incubator-ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From maha...@apache.org
Subject [2/2] git commit: AMBARI-3607. Ambari-Client create unit tests for methods of ambariClient class. (Andrew Onischuk via mahadev)
Date Mon, 28 Oct 2013 17:55:51 GMT
AMBARI-3607. Ambari-Client create unit tests for methods of ambariClient class. (Andrew Onischuk via mahadev)


Project: http://git-wip-us.apache.org/repos/asf/incubator-ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-ambari/commit/b2d05706
Tree: http://git-wip-us.apache.org/repos/asf/incubator-ambari/tree/b2d05706
Diff: http://git-wip-us.apache.org/repos/asf/incubator-ambari/diff/b2d05706

Branch: refs/heads/trunk
Commit: b2d057065123d2e5b25b3c3592d878d1e7db3eb1
Parents: 399a63b
Author: Mahadev Konar <mahadev@apache.org>
Authored: Mon Oct 28 10:57:37 2013 -0700
Committer: Mahadev Konar <mahadev@apache.org>
Committed: Mon Oct 28 10:57:37 2013 -0700

----------------------------------------------------------------------
 .../src/test/python/TestAmbariClient.py         | 196 ++--
 .../src/test/python/TestClusterModel.py         |  22 +-
 .../json/ambariclient_bootstrap_hosts.json      |   5 +
 .../json/ambariclient_get_all_clusters.json     |  12 +
 .../python/json/ambariclient_get_all_hosts.json |  77 ++
 .../json/ambariclient_get_components.json       |  53 ++
 .../python/json/ambariclient_get_config.json    | 905 +++++++++++++++++++
 .../test/python/json/ambariclient_get_host.json |  63 ++
 .../json/clustermodel_error_deleting_host.json  |   4 +
 .../test/python/json/error_deleting_host.json   |   4 -
 .../src/test/python/json/get_all_clusters.json  |  12 -
 .../src/test/python/json/get_all_hosts.json     |  77 --
 .../src/test/python/json/get_host.json          |  63 --
 .../src/test/python/utils/HttpClientInvoker.py  | 143 +--
 14 files changed, 1340 insertions(+), 296 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/b2d05706/ambari-client/src/test/python/TestAmbariClient.py
----------------------------------------------------------------------
diff --git a/ambari-client/src/test/python/TestAmbariClient.py b/ambari-client/src/test/python/TestAmbariClient.py
index 9bec45e..5e20957 100755
--- a/ambari-client/src/test/python/TestAmbariClient.py
+++ b/ambari-client/src/test/python/TestAmbariClient.py
@@ -21,11 +21,16 @@ limitations under the License.
 
 from mock.mock import MagicMock, patch
 from ambari_client.ambari_api import  AmbariClient 
-from ambari_client.core.errors import BadRequest
 from HttpClientInvoker import HttpClientInvoker
+from ambari_client.model.stack import StackConfigModel, StackComponentModel
 import unittest
 
 class TestAmbariClient(unittest.TestCase):
+  
+  def create_client(self, http_client_mock = MagicMock()):
+    http_client_mock.invoke.side_effect = HttpClientInvoker.http_client_invoke_side_effects
+    client = AmbariClient("localhost", 8080, "admin", "admin", version=1, client=http_client_mock)
+    return client
 
   def test_init(self):
     """
@@ -53,118 +58,149 @@ class TestAmbariClient(unittest.TestCase):
     self.assertEqual(client.version, 1, "version should be 1")
     self.assertEqual(client.host_url, "https://localhost:8443/api/v1",
                        "host_url should be https://localhost:8443/api/v1")
-    
-  
-    
-  @patch("ambari_client.core.http_client.HttpClient")  
-  def test_get_all_clusters_valid(self , http_client):
+      
+  def test_get_all_clusters(self):
     """
     Get all clusters.
     This testcase checks if get_all_clusters returns a list of ModelList.
     """
-    http_client_mock = MagicMock()
-    http_client.return_value = http_client_mock
-    
-    mocked_code = "200" 
-    mocked_content = "text/plain"
     expected_output = {'items': [{'cluster_name': u'test1', 'version': u'HDP-1.2.1'}]}
-    
-    linestring = open('json/get_all_clusters.json', 'r').read()
-    mocked_response = linestring
-    http_client_mock.invoke.return_value = mocked_response , mocked_code , mocked_content
-   
       
-    client = AmbariClient("localhost", 8080, "admin", "admin", version=1 , client=http_client_mock)
+    client = self.create_client()
     all_clusters = client.get_all_clusters()
       
-    self.assertEqual(len(all_clusters), 1, "There should be a cluster from the response")
-    self.assertEqual(all_clusters.to_json_dict(), expected_output, "to_json_dict should convert ModelList")
+    self.assertEqual(len(all_clusters), 1)
+    self.assertEqual(all_clusters.to_json_dict(), expected_output)
     
-  @patch("ambari_client.core.http_client.HttpClient")  
-  def test_get_all_hosts(self , http_client):
+  def test_get_cluster(self):
     """
-    Get all hosts.
-    This testcase checks if get_all_hosts returns a list of ModelList.
+    Get all clusters.
+    This testcase checks if get_all_clusters returns a list of ModelList.
     """
-    http_client_mock = MagicMock()
-    http_client.return_value = http_client_mock
+    expected_dict_output = {'cluster_name': u'test1', 'version': u'HDP-1.2.1'}
     
-    mocked_code = "200" 
-    mocked_content = "text/plain"
+    client = self.create_client()
+    cluster = client.get_cluster('test1')
     
-    linestring = open('json/get_all_hosts.json', 'r').read()
-    mocked_response = linestring
-    http_client_mock.invoke.return_value = mocked_response , mocked_code , mocked_content
-   
-      
-    client = AmbariClient("localhost", 8080, "admin", "admin", version=1 , client=http_client_mock)
-    all_hosts = client.get_all_hosts()
-      
-    self.assertEqual(len(all_hosts), 12, "There should be 12 hosts from the response")
+    self.assertEqual(cluster.cluster_name, "test1", "cluster_name should be test1 ")
+    self.assertEqual(cluster.to_json_dict(), expected_dict_output, "to_json_dict should convert ClusterModel")
     
-  @patch("ambari_client.core.http_client.HttpClient")  
-  def test_get_host_valid(self , http_client):
+  def test_get_host(self):
     """
     Get host
     This testcase checks if client.get_host returns a correct host
     """
-    http_client_mock = MagicMock()
-    http_client.returned_obj = http_client_mock
-    mocked_code = "200" 
-    mocked_content = "text/plain"
-    
-    linestring = open('json/get_host.json', 'r').read()
-    mocked_response = linestring
     expected_dict_output = {'ip': '10.0.2.15', 'host_name': 'dev06.hortonworks.com', 'rack_info': '/default-rack'}
     
-    http_client_mock.invoke.return_value = mocked_response , mocked_code , mocked_content
-    client = AmbariClient("localhost", 8080, "admin", "admin", version=1, client=http_client_mock)
+    client = self.create_client()
     host = client.get_host('dev06.hortonworks.com')
     
     self.assertEqual(host.to_json_dict(), expected_dict_output)
+    self.assertEqual(host.host_state, "HEARTBEAT_LOST")
+     
+  def test_get_all_hosts(self):
+    """
+    Get all hosts.
+    This testcase checks if get_all_hosts returns a list of ModelList.
+    """
+    expected_hosts_dict = {'items': [{'ip': None, 'host_name': u'apspal44-83', 'rack_info': '/default-rack'}, {'ip': None, 'host_name': u'apspal44-84', 'rack_info': '/default-rack'}, {'ip': None, 'host_name': u'apspal44-85', 'rack_info': '/default-rack'}, {'ip': None, 'host_name': u'apspal44-86', 'rack_info': '/default-rack'}, {'ip': None, 'host_name': u'apspal44-87', 'rack_info': '/default-rack'}, {'ip': None, 'host_name': u'apspal44-88', 'rack_info': '/default-rack'}, {'ip': None, 'host_name': u'apspal44-89', 'rack_info': '/default-rack'}, {'ip': None, 'host_name': u'r01hn01', 'rack_info': '/default-rack'}, {'ip': None, 'host_name': u'r01mgt', 'rack_info': '/default-rack'}, {'ip': None, 'host_name': u'r01wn01', 'rack_info': '/default-rack'}, {'ip': None, 'host_name': u'r01wn02', 'rack_info': '/default-rack'}, {'ip': None, 'host_name': u'r01wn03', 'rack_info': '/default-rack'}]}
+      
+    client = self.create_client()
+    all_hosts = client.get_all_hosts()
+    
+    self.assertEqual(len(all_hosts), 12, "There should be 12 hosts from the response")
+    self.assertEqual(all_hosts.to_json_dict(), expected_hosts_dict)
     
-       
-  @patch("ambari_client.core.http_client.HttpClient")  
-  def test_get_cluster_valid(self , http_client):
+  def test_bootstrap_hosts(self):
     """
-    Get all clusters.
-    This testcase checks if get_all_clusters returns a list of ModelList.
+    Test Bootstrap
     """
     http_client_mock = MagicMock()
-    http_client.returned_obj = http_client_mock
-    mocked_code = "200" 
-    mocked_content = "text/plain"
     
-    linestring = open('json/clustermodel_get_cluster.json', 'r').read()
-    mocked_response = linestring
-    expected_dict_output = {'cluster_name': u'test1', 'version': u'HDP-1.2.1'}
+    ssh_key = 'abc!@#$%^&*()_:"|<>?[];\'\\./'
+    host_list = ['dev05.hortonworks.com','dev06.hortonworks.com']
     
-    http_client_mock.invoke.return_value = mocked_response , mocked_code , mocked_content
-    client = AmbariClient("localhost", 8080, "admin", "admin", version=1, client=http_client_mock)
-    cluster = client.get_cluster('test1')
+    expected_path = '//bootstrap'
+    expected_headers = {'Content-Type': 'application/json'}
+    expected_request = {'hosts': host_list, 'sshKey': 'abc!@#$%^&*()_:"|<>?[];\\\'\\\\./'}
+    expected_response = {'status': 201, 'message': u'Running Bootstrap now.', 'requestId': 5}
+                               
+    client = self.create_client(http_client_mock)
+    resp = client.bootstrap_hosts(host_list, ssh_key)
+
+    self.assertEqual(resp.to_json_dict(), expected_response)
+    http_client_mock.invoke.assert_called_with('POST', expected_path, headers=expected_headers, payload=expected_request)
+  
+  def test_create_cluster(self):
+    """
+    Test create cluster
+    """
+    http_client_mock = MagicMock()
     
-    self.assertEqual(cluster.cluster_name, "test1", "cluster_name should be test1 ")
-    self.assertEqual(cluster.to_json_dict(), expected_dict_output, "to_json_dict should convert ClusterModel")
+    expected_path = '//clusters/c1'
+    expected_request = {'Clusters': {'version': 'HDP-2.0.5'}}
+          
+    client = self.create_client(http_client_mock)
+    resp = client.create_cluster('c1', 'HDP-2.0.5')
     
-  @patch("ambari_client.core.http_client.HttpClient")  
-  def test_exceptions(self , http_client):
+    http_client_mock.invoke.assert_called_with('POST', expected_path, headers=None, payload=expected_request)
+    
+  def test_delete_cluster(self):
     """
-    Test exceptions from ambari.client.core.errors
+    Test create cluster
     """
     http_client_mock = MagicMock()
-    http_client.returned_obj = http_client_mock
-    mocked_code = "200" 
-    mocked_content = "text/plain"
     
-    http_client_mock.invoke.side_effect = HttpClientInvoker.http_client_invoke_side_effects
-    client = AmbariClient("localhost", 8080, "admin", "admin", version=1, client=http_client_mock)
-    cluster = client.get_cluster('test1')
+    expected_path = '//clusters/c1'
+    expected_request = None
+          
+    client = self.create_client(http_client_mock)
+    resp = client.delete_cluster('c1')
     
-    try:
-      cluster.delete_host('deleted_nonexistant_cluster')
-      print http_client_mock.invoke.call_args_list
-      self.fail('Exception should have been thrown!')
-    except BadRequest, ex:
-      self.assertEquals(str(ex), 'exception: 400. Attempted to add unknown hosts to a cluster.  These hosts have not been registered with the server: dev05')
-    except Exception, ex:
-      self.fail('Wrong exception thrown!')
+    http_client_mock.invoke.assert_called_with('DELETE', expected_path, headers=None, payload=expected_request)
+    
+  def test_delete_host(self):
+    """
+    Test delete host
+    """
+    http_client_mock = MagicMock()
+    
+    expected_path = '//hosts/abc.abc.abc'
+    expected_request = None
+          
+    client = self.create_client(http_client_mock)
+    resp = client.delete_host('abc.abc.abc')
+    
+    http_client_mock.invoke.assert_called_with('DELETE', expected_path, headers=None, payload=expected_request)
+    
+  def test_get_config(self):
+    """
+    Test get config
+    """
+    expected_dict = {'items': [{'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'datanode_du_reserved', 'property_value': u'1'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs.access.time.precision', 'property_value': u'0'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs.balance.bandwidthPerSec', 'property_value': u'6250000'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs.block.access.token.enable', 'property_value': u'true'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs.block.size', 'property_value': u'134217728'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs.blockreport.initialDelay', 'property_value': u'120'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs.cluster.administrators', 'property_value': u' hdfs'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'df
 s.datanode.du.pct', 'property_value': u'0.85f'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs.datanode.failed.volumes.tolerated', 'property_value': u'0'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs.datanode.ipc.address', 'property_value': u'0.0.0.0:8010'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs.datanode.max.xcievers', 'property_value': u'4096'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs.datanode.socket.write.timeout', 'property_value': u'0'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs.heartbeat.interval', 'property_value': u'3'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs.https.port', 'property_value': u'50470'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs.namenode.avoid.read.stale.datanode', 'property_value': u'true'}, {'stack_version': u'1.3.0', 'servi
 ce_name': u'HDFS', 'property_name': u'dfs.namenode.avoid.write.stale.datanode', 'property_value': u'true'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs.namenode.handler.count', 'property_value': u'100'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs.namenode.handler.count', 'property_value': u'40'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs.namenode.stale.datanode.interval', 'property_value': u'30000'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs.namenode.write.stale.datanode.ratio', 'property_value': u'1.0f'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs.permissions', 'property_value': u'true'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs.permissions.supergroup', 'property_value': u'hdfs'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs.replication.max', 'property_v
 alue': u'50'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs.safemode.threshold.pct', 'property_value': u'1.0f'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs.secondary.https.port', 'property_value': u'50490'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs.umaskmode', 'property_value': u'077'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs.web.ugi', 'property_value': u'gopher,gopher'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs_block_local_path_access_user', 'property_value': u'hbase'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs_data_dir', 'property_value': u'/hadoop/hdfs/data'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs_datanode_address', 'property_value': u'50010'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs_datanode_data_dir_perm'
 , 'property_value': u'750'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs_datanode_failed_volume_tolerated', 'property_value': u'0'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs_datanode_http_address', 'property_value': u'50075'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs_name_dir', 'property_value': u'/hadoop/hdfs/namenode'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs_replication', 'property_value': u'3'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dfs_webhdfs_enabled', 'property_value': u'true'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'dtnode_heapsize', 'property_value': u'1024'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'fs.checkpoint.edits.dir', 'property_value': u'${fs.checkpoint.dir}'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'fs
 .checkpoint.period', 'property_value': u'21600'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'fs.checkpoint.size', 'property_value': u'536870912'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'fs.trash.interval', 'property_value': u'360'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'fs_checkpoint_dir', 'property_value': u'/hadoop/hdfs/namesecondary'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'fs_checkpoint_period', 'property_value': u'21600'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'fs_checkpoint_size', 'property_value': u'0.5'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'hadoop.security.authentication', 'property_value': u'simple'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'hadoop_heapsize', 'property_value': u'1024'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_
 name': u'hadoop_pid_dir_prefix', 'property_value': u'/var/run/hadoop'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'hdfs_log_dir_prefix', 'property_value': u'/var/log/hadoop'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'hdfs_user', 'property_value': u'hdfs'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'io.compression.codec.lzo.class', 'property_value': u'com.hadoop.compression.lzo.LzoCodec'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'io.compression.codecs', 'property_value': u'org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'io.file.buffer.size', 'property_value': u'131072'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'io.
 serializations', 'property_value': u'org.apache.hadoop.io.serializer.WritableSerialization'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'ipc.client.connect.max.retries', 'property_value': u'50'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'ipc.client.connection.maxidletime', 'property_value': u'30000'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'ipc.client.idlethreshold', 'property_value': u'8000'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'ipc.server.max.response.size', 'property_value': u'5242880'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'ipc.server.read.threadpool.size', 'property_value': u'5'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'kerberos_domain', 'property_value': u'EXAMPLE.COM'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'keytab_path', 'property_value': u'/etc/securit
 y/keytabs'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'namenode_formatted_mark_dir', 'property_value': u'/var/run/hadoop/hdfs/namenode/formatted/'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'namenode_heapsize', 'property_value': u'1024'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'namenode_opt_maxnewsize', 'property_value': u'640'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'namenode_opt_newsize', 'property_value': u'200'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'proxyuser_group', 'property_value': u'users'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'security.client.datanode.protocol.acl', 'property_value': u'*'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'security.client.protocol.acl', 'property_value': u'*'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name
 ': u'security.datanode.protocol.acl', 'property_value': u'*'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'security.inter.datanode.protocol.acl', 'property_value': u'*'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'security.inter.tracker.protocol.acl', 'property_value': u'*'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'security.job.submission.protocol.acl', 'property_value': u'*'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'security.namenode.protocol.acl', 'property_value': u'*'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'security.task.umbilical.protocol.acl', 'property_value': u'*'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'security_enabled', 'property_value': u'false'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'property_name': u'webinterface.private.actions', 'property_value': u'false'}]}
+    expected_first_item = StackConfigModel(None, property_name='datanode_du_reserved' , property_value='1' , service_name='HDFS' , stack_version='1.3.0')    
+    expected_request = None
+              
+    client = self.create_client()
+    configs = client.get_config('1.3.0','HDFS')
+    
+        
+    self.assertEquals(len(configs), 75)
+    self.assertEquals(str(configs[0]),str(expected_first_item))
+    self.assertEquals(configs.to_json_dict(), expected_dict)
+    
+  def test_get_components(self):
+    """
+    Test get components
+    """
+    expected_dict = {'items': [{'stack_version': u'1.3.0', 'service_name': u'HDFS', 'component_name': u'DATANODE'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'component_name': u'HDFS_CLIENT'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'component_name': u'NAMENODE'}, {'stack_version': u'1.3.0', 'service_name': u'HDFS', 'component_name': u'SECONDARY_NAMENODE'}]}
+    expected_first_item = StackComponentModel(None, component_name='DATANODE', service_name='HDFS' , stack_version='1.3.0')    
+    expected_request = None
+              
+    client = self.create_client()
+    components = client.get_components('1.3.0','HDFS')
+        
+    self.assertEquals(len(components), 4)
+    self.assertEquals(str(components[0]),str(expected_first_item))
+    self.assertEquals(components.to_json_dict(), expected_dict)
+  
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/b2d05706/ambari-client/src/test/python/TestClusterModel.py
----------------------------------------------------------------------
diff --git a/ambari-client/src/test/python/TestClusterModel.py b/ambari-client/src/test/python/TestClusterModel.py
index 3eeef1e..519419e 100644
--- a/ambari-client/src/test/python/TestClusterModel.py
+++ b/ambari-client/src/test/python/TestClusterModel.py
@@ -20,13 +20,15 @@ limitations under the License.
 
 
 from mock.mock import MagicMock, patch
-from ambari_client.ambari_api import  AmbariClient
 from HttpClientInvoker import HttpClientInvoker
+
+from ambari_client.ambari_api import  AmbariClient
 from ambari_client.model.host import HostModel
+from ambari_client.core.errors import BadRequest
 
 import unittest
 
-class TestAmbariClient(unittest.TestCase):
+class TestClusterModel(unittest.TestCase):
   
   def create_cluster(self, http_client_mock = MagicMock()):    
     http_client_mock.invoke.side_effect = HttpClientInvoker.http_client_invoke_side_effects
@@ -325,6 +327,22 @@ class TestAmbariClient(unittest.TestCase):
     self.assertEqual(cluster.cluster_name, "test1")
     http_client_mock.invoke.assert_called_with('DELETE', expected_path, headers=None, payload=None)
     
+    
+  def test_exceptions(self):
+    """
+    Test exceptions from ambari.client.core.errors
+    """
+    cluster = self.create_cluster()
+    
+    try:
+      cluster.delete_host('deleted_nonexistant_cluster')
+      print http_client_mock.invoke.call_args_list
+      self.fail('Exception should have been thrown!')
+    except BadRequest, ex:
+      self.assertEquals(str(ex), 'exception: 400. Attempted to add unknown hosts to a cluster.  These hosts have not been registered with the server: dev05')
+    except Exception, ex:
+      self.fail('Wrong exception thrown!')
+    
   def test_start_all_services(self):
     """
     Start all services

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/b2d05706/ambari-client/src/test/python/json/ambariclient_bootstrap_hosts.json
----------------------------------------------------------------------
diff --git a/ambari-client/src/test/python/json/ambariclient_bootstrap_hosts.json b/ambari-client/src/test/python/json/ambariclient_bootstrap_hosts.json
new file mode 100644
index 0000000..1e60bd0
--- /dev/null
+++ b/ambari-client/src/test/python/json/ambariclient_bootstrap_hosts.json
@@ -0,0 +1,5 @@
+{
+  "status" : "OK",
+  "log" : "Running Bootstrap now.",
+  "requestId" : 5
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/b2d05706/ambari-client/src/test/python/json/ambariclient_get_all_clusters.json
----------------------------------------------------------------------
diff --git a/ambari-client/src/test/python/json/ambariclient_get_all_clusters.json b/ambari-client/src/test/python/json/ambariclient_get_all_clusters.json
new file mode 100644
index 0000000..abbf619
--- /dev/null
+++ b/ambari-client/src/test/python/json/ambariclient_get_all_clusters.json
@@ -0,0 +1,12 @@
+{
+  "href" : "http://localhost:8080/api/v1/clusters",
+  "items" : [
+    {
+      "href" : "http://localhost:8080/api/v1/clusters/test1",
+      "Clusters" : {
+        "cluster_name" : "test1",
+        "version" : "HDP-1.2.1"
+      }
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/b2d05706/ambari-client/src/test/python/json/ambariclient_get_all_hosts.json
----------------------------------------------------------------------
diff --git a/ambari-client/src/test/python/json/ambariclient_get_all_hosts.json b/ambari-client/src/test/python/json/ambariclient_get_all_hosts.json
new file mode 100644
index 0000000..2aca5c4
--- /dev/null
+++ b/ambari-client/src/test/python/json/ambariclient_get_all_hosts.json
@@ -0,0 +1,77 @@
+{
+  "href" : "http://localhost:8080/api/v1/hosts",
+  "items" : [
+    {
+      "href" : "http://localhost:8080/api/v1/hosts/apspal44-83",
+      "Hosts" : {
+        "host_name" : "apspal44-83"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/hosts/apspal44-84",
+      "Hosts" : {
+        "host_name" : "apspal44-84"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/hosts/apspal44-85",
+      "Hosts" : {
+        "host_name" : "apspal44-85"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/hosts/apspal44-86",
+      "Hosts" : {
+        "host_name" : "apspal44-86"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/hosts/apspal44-87",
+      "Hosts" : {
+        "host_name" : "apspal44-87"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/hosts/apspal44-88",
+      "Hosts" : {
+        "host_name" : "apspal44-88"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/hosts/apspal44-89",
+      "Hosts" : {
+        "host_name" : "apspal44-89"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/hosts/r01hn01",
+      "Hosts" : {
+        "host_name" : "r01hn01"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/hosts/r01mgt",
+      "Hosts" : {
+        "host_name" : "r01mgt"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/hosts/r01wn01",
+      "Hosts" : {
+        "host_name" : "r01wn01"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/hosts/r01wn02",
+      "Hosts" : {
+        "host_name" : "r01wn02"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/hosts/r01wn03",
+      "Hosts" : {
+        "host_name" : "r01wn03"
+      }
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/b2d05706/ambari-client/src/test/python/json/ambariclient_get_components.json
----------------------------------------------------------------------
diff --git a/ambari-client/src/test/python/json/ambariclient_get_components.json b/ambari-client/src/test/python/json/ambariclient_get_components.json
new file mode 100644
index 0000000..56bb1b2
--- /dev/null
+++ b/ambari-client/src/test/python/json/ambariclient_get_components.json
@@ -0,0 +1,53 @@
+{
+  "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/serviceComponents?fields=*",
+  "items" : [
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/serviceComponents/DATANODE",
+      "StackServiceComponents" : {
+        "component_category" : "SLAVE",
+        "component_name" : "DATANODE",
+        "is_client" : false,
+        "is_master" : false,
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/serviceComponents/HDFS_CLIENT",
+      "StackServiceComponents" : {
+        "component_category" : "CLIENT",
+        "component_name" : "HDFS_CLIENT",
+        "is_client" : true,
+        "is_master" : false,
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/serviceComponents/NAMENODE",
+      "StackServiceComponents" : {
+        "component_category" : "MASTER",
+        "component_name" : "NAMENODE",
+        "is_client" : false,
+        "is_master" : true,
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/serviceComponents/SECONDARY_NAMENODE",
+      "StackServiceComponents" : {
+        "component_category" : "MASTER",
+        "component_name" : "SECONDARY_NAMENODE",
+        "is_client" : false,
+        "is_master" : true,
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0"
+      }
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/b2d05706/ambari-client/src/test/python/json/ambariclient_get_config.json
----------------------------------------------------------------------
diff --git a/ambari-client/src/test/python/json/ambariclient_get_config.json b/ambari-client/src/test/python/json/ambariclient_get_config.json
new file mode 100644
index 0000000..55f35bc
--- /dev/null
+++ b/ambari-client/src/test/python/json/ambariclient_get_config.json
@@ -0,0 +1,905 @@
+{
+  "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations?fields=*",
+  "items" : [
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/datanode_du_reserved",
+      "StackConfigurations" : {
+        "property_description" : "Reserved space for HDFS",
+        "property_name" : "datanode_du_reserved",
+        "property_value" : "1",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "global.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.access.time.precision",
+      "StackConfigurations" : {
+        "property_description" : "The access time for HDFS file is precise upto this value.\n               The default value is 1 hour. Setting a value of 0 disables\n               access times for HDFS.\n  ",
+        "property_name" : "dfs.access.time.precision",
+        "property_value" : "0",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.balance.bandwidthPerSec",
+      "StackConfigurations" : {
+        "property_description" : "\n        Specifies the maximum amount of bandwidth that each datanode\n        can utilize for the balancing purpose in term of\n        the number of bytes per second.\n  ",
+        "property_name" : "dfs.balance.bandwidthPerSec",
+        "property_value" : "6250000",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.block.access.token.enable",
+      "StackConfigurations" : {
+        "property_description" : "\nIf \"true\", access tokens are used as capabilities for accessing datanodes.\nIf \"false\", no access tokens are checked on accessing datanodes.\n",
+        "property_name" : "dfs.block.access.token.enable",
+        "property_value" : "true",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.block.size",
+      "StackConfigurations" : {
+        "property_description" : "The default block size for new files.",
+        "property_name" : "dfs.block.size",
+        "property_value" : "134217728",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.blockreport.initialDelay",
+      "StackConfigurations" : {
+        "property_description" : "Delay for first block report in seconds.",
+        "property_name" : "dfs.blockreport.initialDelay",
+        "property_value" : "120",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.cluster.administrators",
+      "StackConfigurations" : {
+        "property_description" : "ACL for who all can view the default servlets in the HDFS",
+        "property_name" : "dfs.cluster.administrators",
+        "property_value" : " hdfs",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.du.pct",
+      "StackConfigurations" : {
+        "property_description" : "When calculating remaining space, only use this percentage of the real available space\n",
+        "property_name" : "dfs.datanode.du.pct",
+        "property_value" : "0.85f",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.failed.volumes.tolerated",
+      "StackConfigurations" : {
+        "property_description" : "Number of failed disks datanode would tolerate",
+        "property_name" : "dfs.datanode.failed.volumes.tolerated",
+        "property_value" : "0",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.ipc.address",
+      "StackConfigurations" : {
+        "property_description" : "\nThe datanode ipc server address and port.\nIf the port is 0 then the server will start on a free port.\n",
+        "property_name" : "dfs.datanode.ipc.address",
+        "property_value" : "0.0.0.0:8010",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.max.xcievers",
+      "StackConfigurations" : {
+        "property_description" : "PRIVATE CONFIG VARIABLE",
+        "property_name" : "dfs.datanode.max.xcievers",
+        "property_value" : "4096",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.socket.write.timeout",
+      "StackConfigurations" : {
+        "property_description" : "DFS Client write socket timeout",
+        "property_name" : "dfs.datanode.socket.write.timeout",
+        "property_value" : "0",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.heartbeat.interval",
+      "StackConfigurations" : {
+        "property_description" : "Determines datanode heartbeat interval in seconds.",
+        "property_name" : "dfs.heartbeat.interval",
+        "property_value" : "3",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.https.port",
+      "StackConfigurations" : {
+        "property_description" : "The https port where namenode binds",
+        "property_name" : "dfs.https.port",
+        "property_value" : "50470",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.namenode.avoid.read.stale.datanode",
+      "StackConfigurations" : {
+        "property_description" : "\n      Indicate whether or not to avoid reading from stale datanodes whose\n      heartbeat messages have not been received by the namenode for more than a\n      specified time interval.\n    ",
+        "property_name" : "dfs.namenode.avoid.read.stale.datanode",
+        "property_value" : "true",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.namenode.avoid.write.stale.datanode",
+      "StackConfigurations" : {
+        "property_description" : "\n      Indicate whether or not to avoid writing to stale datanodes whose\n      heartbeat messages have not been received by the namenode for more than a\n      specified time interval.\n    ",
+        "property_name" : "dfs.namenode.avoid.write.stale.datanode",
+        "property_value" : "true",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.namenode.handler.count",
+      "StackConfigurations" : {
+        "property_description" : "Added to grow Queue size so that more client connections are allowed",
+        "property_name" : "dfs.namenode.handler.count",
+        "property_value" : "100",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.namenode.handler.count",
+      "StackConfigurations" : {
+        "property_description" : "The number of server threads for the namenode.",
+        "property_name" : "dfs.namenode.handler.count",
+        "property_value" : "40",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.namenode.stale.datanode.interval",
+      "StackConfigurations" : {
+        "property_description" : "Datanode is stale after not getting a heartbeat in this interval in ms",
+        "property_name" : "dfs.namenode.stale.datanode.interval",
+        "property_value" : "30000",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.namenode.write.stale.datanode.ratio",
+      "StackConfigurations" : {
+        "property_description" : "When the ratio of number stale datanodes to total datanodes marked is greater\n      than this ratio, stop avoiding writing to stale nodes so as to prevent causing hotspots.\n    ",
+        "property_name" : "dfs.namenode.write.stale.datanode.ratio",
+        "property_value" : "1.0f",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.permissions",
+      "StackConfigurations" : {
+        "property_description" : "\nIf \"true\", enable permission checking in HDFS.\nIf \"false\", permission checking is turned off,\nbut all other behavior is unchanged.\nSwitching from one parameter value to the other does not change the mode,\nowner or group of files or directories.\n",
+        "property_name" : "dfs.permissions",
+        "property_value" : "true",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.permissions.supergroup",
+      "StackConfigurations" : {
+        "property_description" : "The name of the group of super-users.",
+        "property_name" : "dfs.permissions.supergroup",
+        "property_value" : "hdfs",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.replication.max",
+      "StackConfigurations" : {
+        "property_description" : "Maximal block replication.\n  ",
+        "property_name" : "dfs.replication.max",
+        "property_value" : "50",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.safemode.threshold.pct",
+      "StackConfigurations" : {
+        "property_description" : "\n        Specifies the percentage of blocks that should satisfy\n        the minimal replication requirement defined by dfs.replication.min.\n        Values less than or equal to 0 mean not to start in safe mode.\n        Values greater than 1 will make safe mode permanent.\n        ",
+        "property_name" : "dfs.safemode.threshold.pct",
+        "property_value" : "1.0f",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.secondary.https.port",
+      "StackConfigurations" : {
+        "property_description" : "The https port where secondary-namenode binds",
+        "property_name" : "dfs.secondary.https.port",
+        "property_value" : "50490",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.umaskmode",
+      "StackConfigurations" : {
+        "property_description" : "\nThe octal umask used when creating files and directories.\n",
+        "property_name" : "dfs.umaskmode",
+        "property_value" : "077",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.web.ugi",
+      "StackConfigurations" : {
+        "property_description" : "The user account used by the web interface.\nSyntax: USERNAME,GROUP1,GROUP2, ...\n",
+        "property_name" : "dfs.web.ugi",
+        "property_value" : "gopher,gopher",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_block_local_path_access_user",
+      "StackConfigurations" : {
+        "property_description" : "Default Block Replication.",
+        "property_name" : "dfs_block_local_path_access_user",
+        "property_value" : "hbase",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "global.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_data_dir",
+      "StackConfigurations" : {
+        "property_description" : "Data directories for Data Nodes.",
+        "property_name" : "dfs_data_dir",
+        "property_value" : "/hadoop/hdfs/data",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "global.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_datanode_address",
+      "StackConfigurations" : {
+        "property_description" : "Port for datanode address.",
+        "property_name" : "dfs_datanode_address",
+        "property_value" : "50010",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "global.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_datanode_data_dir_perm",
+      "StackConfigurations" : {
+        "property_description" : "Datanode dir perms.",
+        "property_name" : "dfs_datanode_data_dir_perm",
+        "property_value" : "750",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "global.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_datanode_failed_volume_tolerated",
+      "StackConfigurations" : {
+        "property_description" : "DataNode volumes failure toleration",
+        "property_name" : "dfs_datanode_failed_volume_tolerated",
+        "property_value" : "0",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "global.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_datanode_http_address",
+      "StackConfigurations" : {
+        "property_description" : "Port for datanode address.",
+        "property_name" : "dfs_datanode_http_address",
+        "property_value" : "50075",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "global.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_name_dir",
+      "StackConfigurations" : {
+        "property_description" : "NameNode Directories.",
+        "property_name" : "dfs_name_dir",
+        "property_value" : "/hadoop/hdfs/namenode",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "global.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_replication",
+      "StackConfigurations" : {
+        "property_description" : "Default Block Replication.",
+        "property_name" : "dfs_replication",
+        "property_value" : "3",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "global.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_webhdfs_enabled",
+      "StackConfigurations" : {
+        "property_description" : "WebHDFS enabled",
+        "property_name" : "dfs_webhdfs_enabled",
+        "property_value" : "true",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "global.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dtnode_heapsize",
+      "StackConfigurations" : {
+        "property_description" : "DataNode maximum Java heap size",
+        "property_name" : "dtnode_heapsize",
+        "property_value" : "1024",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "global.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs.checkpoint.edits.dir",
+      "StackConfigurations" : {
+        "property_description" : "Determines where on the local filesystem the DFS secondary\n        name node should store the temporary edits to merge.\n        If this is a comma-delimited list of directoires then teh edits is\n        replicated in all of the directoires for redundancy.\n        Default value is same as fs.checkpoint.dir\n    ",
+        "property_name" : "fs.checkpoint.edits.dir",
+        "property_value" : "${fs.checkpoint.dir}",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "core-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs.checkpoint.period",
+      "StackConfigurations" : {
+        "property_description" : "The number of seconds between two periodic checkpoints.\n  ",
+        "property_name" : "fs.checkpoint.period",
+        "property_value" : "21600",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "core-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs.checkpoint.size",
+      "StackConfigurations" : {
+        "property_description" : "The size of the current edit log (in bytes) that triggers\n       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.\n  ",
+        "property_name" : "fs.checkpoint.size",
+        "property_value" : "536870912",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "core-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs.trash.interval",
+      "StackConfigurations" : {
+        "property_description" : "Number of minutes between trash checkpoints.\n  If zero, the trash feature is disabled.\n  ",
+        "property_name" : "fs.trash.interval",
+        "property_value" : "360",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "core-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs_checkpoint_dir",
+      "StackConfigurations" : {
+        "property_description" : "Secondary NameNode checkpoint dir.",
+        "property_name" : "fs_checkpoint_dir",
+        "property_value" : "/hadoop/hdfs/namesecondary",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "global.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs_checkpoint_period",
+      "StackConfigurations" : {
+        "property_description" : "HDFS Maximum Checkpoint Delay",
+        "property_name" : "fs_checkpoint_period",
+        "property_value" : "21600",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "global.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs_checkpoint_size",
+      "StackConfigurations" : {
+        "property_description" : "FS Checkpoint Size.",
+        "property_name" : "fs_checkpoint_size",
+        "property_value" : "0.5",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "global.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/hadoop.security.authentication",
+      "StackConfigurations" : {
+        "property_description" : "\n   Set the authentication for the cluster. Valid values are: simple or\n   kerberos.\n   ",
+        "property_name" : "hadoop.security.authentication",
+        "property_value" : "simple",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "core-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/hadoop_heapsize",
+      "StackConfigurations" : {
+        "property_description" : "Hadoop maximum Java heap size",
+        "property_name" : "hadoop_heapsize",
+        "property_value" : "1024",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "global.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/hadoop_pid_dir_prefix",
+      "StackConfigurations" : {
+        "property_description" : "Hadoop PID Dir Prefix",
+        "property_name" : "hadoop_pid_dir_prefix",
+        "property_value" : "/var/run/hadoop",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "global.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/hdfs_log_dir_prefix",
+      "StackConfigurations" : {
+        "property_description" : "Hadoop Log Dir Prefix",
+        "property_name" : "hdfs_log_dir_prefix",
+        "property_value" : "/var/log/hadoop",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "global.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/hdfs_user",
+      "StackConfigurations" : {
+        "property_description" : "User and Groups.",
+        "property_name" : "hdfs_user",
+        "property_value" : "hdfs",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "global.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/io.compression.codec.lzo.class",
+      "StackConfigurations" : {
+        "property_description" : "The implementation for lzo codec.",
+        "property_name" : "io.compression.codec.lzo.class",
+        "property_value" : "com.hadoop.compression.lzo.LzoCodec",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "core-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/io.compression.codecs",
+      "StackConfigurations" : {
+        "property_description" : "A list of the compression codec classes that can be used\n                 for compression/decompression.",
+        "property_name" : "io.compression.codecs",
+        "property_value" : "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "core-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/io.file.buffer.size",
+      "StackConfigurations" : {
+        "property_description" : "The size of buffer for use in sequence files.\n  The size of this buffer should probably be a multiple of hardware\n  page size (4096 on Intel x86), and it determines how much data is\n  buffered during read and write operations.",
+        "property_name" : "io.file.buffer.size",
+        "property_value" : "131072",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "core-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/io.serializations",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_name" : "io.serializations",
+        "property_value" : "org.apache.hadoop.io.serializer.WritableSerialization",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "core-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.client.connect.max.retries",
+      "StackConfigurations" : {
+        "property_description" : "Defines the maximum number of retries for IPC connections.",
+        "property_name" : "ipc.client.connect.max.retries",
+        "property_value" : "50",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "core-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.client.connection.maxidletime",
+      "StackConfigurations" : {
+        "property_description" : "The maximum time after which a client will bring down the\n               connection to the server.\n  ",
+        "property_name" : "ipc.client.connection.maxidletime",
+        "property_value" : "30000",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "core-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.client.idlethreshold",
+      "StackConfigurations" : {
+        "property_description" : "Defines the threshold number of connections after which\n               connections will be inspected for idleness.\n  ",
+        "property_name" : "ipc.client.idlethreshold",
+        "property_value" : "8000",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "core-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.server.max.response.size",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_name" : "ipc.server.max.response.size",
+        "property_value" : "5242880",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.server.read.threadpool.size",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_name" : "ipc.server.read.threadpool.size",
+        "property_value" : "5",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/kerberos_domain",
+      "StackConfigurations" : {
+        "property_description" : "Kerberos realm.",
+        "property_name" : "kerberos_domain",
+        "property_value" : "EXAMPLE.COM",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "global.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/keytab_path",
+      "StackConfigurations" : {
+        "property_description" : "Kerberos keytab path.",
+        "property_name" : "keytab_path",
+        "property_value" : "/etc/security/keytabs",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "global.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_formatted_mark_dir",
+      "StackConfigurations" : {
+        "property_description" : "Formatteed Mark Directory.",
+        "property_name" : "namenode_formatted_mark_dir",
+        "property_value" : "/var/run/hadoop/hdfs/namenode/formatted/",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "global.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_heapsize",
+      "StackConfigurations" : {
+        "property_description" : "NameNode Java heap size",
+        "property_name" : "namenode_heapsize",
+        "property_value" : "1024",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "global.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_opt_maxnewsize",
+      "StackConfigurations" : {
+        "property_description" : "NameNode maximum new generation size",
+        "property_name" : "namenode_opt_maxnewsize",
+        "property_value" : "640",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "global.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_opt_newsize",
+      "StackConfigurations" : {
+        "property_description" : "NameNode new generation size",
+        "property_name" : "namenode_opt_newsize",
+        "property_value" : "200",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "global.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/proxyuser_group",
+      "StackConfigurations" : {
+        "property_description" : "Proxy user group.",
+        "property_name" : "proxyuser_group",
+        "property_value" : "users",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "global.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.client.datanode.protocol.acl",
+      "StackConfigurations" : {
+        "property_description" : "ACL for ClientDatanodeProtocol, the client-to-datanode protocol\n    for block recovery.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
+        "property_name" : "security.client.datanode.protocol.acl",
+        "property_value" : "*",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hadoop-policy.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.client.protocol.acl",
+      "StackConfigurations" : {
+        "property_description" : "ACL for ClientProtocol, which is used by user code\n    via the DistributedFileSystem.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
+        "property_name" : "security.client.protocol.acl",
+        "property_value" : "*",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hadoop-policy.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.datanode.protocol.acl",
+      "StackConfigurations" : {
+        "property_description" : "ACL for DatanodeProtocol, which is used by datanodes to\n    communicate with the namenode.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
+        "property_name" : "security.datanode.protocol.acl",
+        "property_value" : "*",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hadoop-policy.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.inter.datanode.protocol.acl",
+      "StackConfigurations" : {
+        "property_description" : "ACL for InterDatanodeProtocol, the inter-datanode protocol\n    for updating generation timestamp.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
+        "property_name" : "security.inter.datanode.protocol.acl",
+        "property_value" : "*",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hadoop-policy.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.inter.tracker.protocol.acl",
+      "StackConfigurations" : {
+        "property_description" : "ACL for InterTrackerProtocol, used by the tasktrackers to\n    communicate with the jobtracker.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
+        "property_name" : "security.inter.tracker.protocol.acl",
+        "property_value" : "*",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hadoop-policy.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.job.submission.protocol.acl",
+      "StackConfigurations" : {
+        "property_description" : "ACL for JobSubmissionProtocol, used by job clients to\n    communciate with the jobtracker for job submission, querying job status etc.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
+        "property_name" : "security.job.submission.protocol.acl",
+        "property_value" : "*",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hadoop-policy.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.namenode.protocol.acl",
+      "StackConfigurations" : {
+        "property_description" : "ACL for NamenodeProtocol, the protocol used by the secondary\n    namenode to communicate with the namenode.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
+        "property_name" : "security.namenode.protocol.acl",
+        "property_value" : "*",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hadoop-policy.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.task.umbilical.protocol.acl",
+      "StackConfigurations" : {
+        "property_description" : "ACL for TaskUmbilicalProtocol, used by the map and reduce\n    tasks to communicate with the parent tasktracker.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
+        "property_name" : "security.task.umbilical.protocol.acl",
+        "property_value" : "*",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "hadoop-policy.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security_enabled",
+      "StackConfigurations" : {
+        "property_description" : "Hadoop Security",
+        "property_name" : "security_enabled",
+        "property_value" : "false",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "global.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/webinterface.private.actions",
+      "StackConfigurations" : {
+        "property_description" : " If set to true, the web interfaces of JT and NN may contain\n                actions, such as kill job, delete file, etc., that should\n                not be exposed to public. Enable this option if the interfaces\n                are only reachable by those who have the right authorization.\n  ",
+        "property_name" : "webinterface.private.actions",
+        "property_value" : "false",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "core-site.xml"
+      }
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/b2d05706/ambari-client/src/test/python/json/ambariclient_get_host.json
----------------------------------------------------------------------
diff --git a/ambari-client/src/test/python/json/ambariclient_get_host.json b/ambari-client/src/test/python/json/ambariclient_get_host.json
new file mode 100644
index 0000000..d710626
--- /dev/null
+++ b/ambari-client/src/test/python/json/ambariclient_get_host.json
@@ -0,0 +1,63 @@
+{
+  "href" : "http://localhost:8080/api/v1/hosts/dev06.hortonworks.com",
+  "Hosts" : {
+    "cpu_count" : 4,
+    "desired_configs" : null,
+    "disk_info" : [
+      {
+        "available" : "45333752",
+        "used" : "5748252",
+        "percent" : "12%",
+        "size" : "51606140",
+        "type" : "ext4",
+        "mountpoint" : "/"
+      },
+      {
+        "available" : "5517976",
+        "used" : "272",
+        "percent" : "1%",
+        "size" : "5518248",
+        "type" : "tmpfs",
+        "mountpoint" : "/dev/shm"
+      },
+      {
+        "available" : "432210",
+        "used" : "38034",
+        "percent" : "9%",
+        "size" : "495844",
+        "type" : "ext4",
+        "mountpoint" : "/boot"
+      },
+      {
+        "available" : "44459840",
+        "used" : "184252",
+        "percent" : "1%",
+        "size" : "47033288",
+        "type" : "ext4",
+        "mountpoint" : "/home"
+      },
+      {
+        "available" : "136400692",
+        "used" : "840256712",
+        "percent" : "87%",
+        "size" : "976657404",
+        "type" : "vboxsf",
+        "mountpoint" : "/media/sf_share"
+      }
+    ],
+    "host_health_report" : "",
+    "host_name" : "dev06.hortonworks.com",
+    "host_state" : "HEARTBEAT_LOST",
+    "host_status" : "UNKNOWN",
+    "ip" : "10.0.2.15",
+    "last_agent_env" : null,
+    "last_heartbeat_time" : 0,
+    "last_registration_time" : 1378228232506,
+    "os_arch" : "x86_64",
+    "os_type" : "centos6",
+    "ph_cpu_count" : 1,
+    "public_host_name" : "dev06.hortonworks.com",
+    "rack_info" : "/default-rack",
+    "total_mem" : 11041505
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/b2d05706/ambari-client/src/test/python/json/clustermodel_error_deleting_host.json
----------------------------------------------------------------------
diff --git a/ambari-client/src/test/python/json/clustermodel_error_deleting_host.json b/ambari-client/src/test/python/json/clustermodel_error_deleting_host.json
new file mode 100644
index 0000000..06d1bc9
--- /dev/null
+++ b/ambari-client/src/test/python/json/clustermodel_error_deleting_host.json
@@ -0,0 +1,4 @@
+{
+  "status" : 400,
+  "message" : "Attempted to add unknown hosts to a cluster.  These hosts have not been registered with the server: dev05"
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/b2d05706/ambari-client/src/test/python/json/error_deleting_host.json
----------------------------------------------------------------------
diff --git a/ambari-client/src/test/python/json/error_deleting_host.json b/ambari-client/src/test/python/json/error_deleting_host.json
deleted file mode 100644
index 06d1bc9..0000000
--- a/ambari-client/src/test/python/json/error_deleting_host.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
-  "status" : 400,
-  "message" : "Attempted to add unknown hosts to a cluster.  These hosts have not been registered with the server: dev05"
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/b2d05706/ambari-client/src/test/python/json/get_all_clusters.json
----------------------------------------------------------------------
diff --git a/ambari-client/src/test/python/json/get_all_clusters.json b/ambari-client/src/test/python/json/get_all_clusters.json
deleted file mode 100755
index abbf619..0000000
--- a/ambari-client/src/test/python/json/get_all_clusters.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
-  "href" : "http://localhost:8080/api/v1/clusters",
-  "items" : [
-    {
-      "href" : "http://localhost:8080/api/v1/clusters/test1",
-      "Clusters" : {
-        "cluster_name" : "test1",
-        "version" : "HDP-1.2.1"
-      }
-    }
-  ]
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/b2d05706/ambari-client/src/test/python/json/get_all_hosts.json
----------------------------------------------------------------------
diff --git a/ambari-client/src/test/python/json/get_all_hosts.json b/ambari-client/src/test/python/json/get_all_hosts.json
deleted file mode 100755
index 2aca5c4..0000000
--- a/ambari-client/src/test/python/json/get_all_hosts.json
+++ /dev/null
@@ -1,77 +0,0 @@
-{
-  "href" : "http://localhost:8080/api/v1/hosts",
-  "items" : [
-    {
-      "href" : "http://localhost:8080/api/v1/hosts/apspal44-83",
-      "Hosts" : {
-        "host_name" : "apspal44-83"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/v1/hosts/apspal44-84",
-      "Hosts" : {
-        "host_name" : "apspal44-84"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/v1/hosts/apspal44-85",
-      "Hosts" : {
-        "host_name" : "apspal44-85"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/v1/hosts/apspal44-86",
-      "Hosts" : {
-        "host_name" : "apspal44-86"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/v1/hosts/apspal44-87",
-      "Hosts" : {
-        "host_name" : "apspal44-87"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/v1/hosts/apspal44-88",
-      "Hosts" : {
-        "host_name" : "apspal44-88"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/v1/hosts/apspal44-89",
-      "Hosts" : {
-        "host_name" : "apspal44-89"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/v1/hosts/r01hn01",
-      "Hosts" : {
-        "host_name" : "r01hn01"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/v1/hosts/r01mgt",
-      "Hosts" : {
-        "host_name" : "r01mgt"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/v1/hosts/r01wn01",
-      "Hosts" : {
-        "host_name" : "r01wn01"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/v1/hosts/r01wn02",
-      "Hosts" : {
-        "host_name" : "r01wn02"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/v1/hosts/r01wn03",
-      "Hosts" : {
-        "host_name" : "r01wn03"
-      }
-    }
-  ]
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/b2d05706/ambari-client/src/test/python/json/get_host.json
----------------------------------------------------------------------
diff --git a/ambari-client/src/test/python/json/get_host.json b/ambari-client/src/test/python/json/get_host.json
deleted file mode 100644
index d710626..0000000
--- a/ambari-client/src/test/python/json/get_host.json
+++ /dev/null
@@ -1,63 +0,0 @@
-{
-  "href" : "http://localhost:8080/api/v1/hosts/dev06.hortonworks.com",
-  "Hosts" : {
-    "cpu_count" : 4,
-    "desired_configs" : null,
-    "disk_info" : [
-      {
-        "available" : "45333752",
-        "used" : "5748252",
-        "percent" : "12%",
-        "size" : "51606140",
-        "type" : "ext4",
-        "mountpoint" : "/"
-      },
-      {
-        "available" : "5517976",
-        "used" : "272",
-        "percent" : "1%",
-        "size" : "5518248",
-        "type" : "tmpfs",
-        "mountpoint" : "/dev/shm"
-      },
-      {
-        "available" : "432210",
-        "used" : "38034",
-        "percent" : "9%",
-        "size" : "495844",
-        "type" : "ext4",
-        "mountpoint" : "/boot"
-      },
-      {
-        "available" : "44459840",
-        "used" : "184252",
-        "percent" : "1%",
-        "size" : "47033288",
-        "type" : "ext4",
-        "mountpoint" : "/home"
-      },
-      {
-        "available" : "136400692",
-        "used" : "840256712",
-        "percent" : "87%",
-        "size" : "976657404",
-        "type" : "vboxsf",
-        "mountpoint" : "/media/sf_share"
-      }
-    ],
-    "host_health_report" : "",
-    "host_name" : "dev06.hortonworks.com",
-    "host_state" : "HEARTBEAT_LOST",
-    "host_status" : "UNKNOWN",
-    "ip" : "10.0.2.15",
-    "last_agent_env" : null,
-    "last_heartbeat_time" : 0,
-    "last_registration_time" : 1378228232506,
-    "os_arch" : "x86_64",
-    "os_type" : "centos6",
-    "ph_cpu_count" : 1,
-    "public_host_name" : "dev06.hortonworks.com",
-    "rack_info" : "/default-rack",
-    "total_mem" : 11041505
-  }
-}
\ No newline at end of file


Mime
View raw message