ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ababiic...@apache.org
Subject [12/19] ambari git commit: AMBARI-8918 Remove support for HDP 1.3 Stack in Ambari 2.0.0. (ababiichuk)
Date Thu, 25 Dec 2014 15:46:16 GMT
http://git-wip-us.apache.org/repos/asf/ambari/blob/3ae9a5c4/ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/HDFS.json
----------------------------------------------------------------------
diff --git a/ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/HDFS.json b/ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/HDFS.json
deleted file mode 100644
index 3c3f5ad..0000000
--- a/ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/HDFS.json
+++ /dev/null
@@ -1,1181 +0,0 @@
-{
-  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations?fields=*",
-  "items" : [
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ambari.dfs.datanode.http.port",
-      "StackConfigurations" : {
-        "property_description" : "\n      The datanode http port. This property is effective only if referenced from dfs.datanode.http.address property.\n    ",
-        "property_name" : "ambari.dfs.datanode.http.port",
-        "property_value" : "50075",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ambari.dfs.datanode.port",
-      "StackConfigurations" : {
-        "property_description" : "\n      The datanode port for data transfer. This property is effective only if referenced from dfs.datanode.address property.\n    ",
-        "property_name" : "ambari.dfs.datanode.port",
-        "property_value" : "50010",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/datanode_du_reserved",
-      "StackConfigurations" : {
-        "property_description" : "Reserved space for HDFS",
-        "property_name" : "datanode_du_reserved",
-        "property_value" : "1",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.access.time.precision",
-      "StackConfigurations" : {
-        "property_description" : "The access time for HDFS file is precise upto this value.\n      The default value is 1 hour. Setting a value of 0 disables\n      access times for HDFS.\n    ",
-        "property_name" : "dfs.access.time.precision",
-        "property_value" : "0",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.balance.bandwidthPerSec",
-      "StackConfigurations" : {
-        "property_description" : "\n      Specifies the maximum amount of bandwidth that each datanode\n      can utilize for the balancing purpose in term of\n      the number of bytes per second.\n    ",
-        "property_name" : "dfs.balance.bandwidthPerSec",
-        "property_value" : "6250000",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.block.access.token.enable",
-      "StackConfigurations" : {
-        "property_description" : "\n      If \"true\", access tokens are used as capabilities for accessing datanodes.\n      If \"false\", no access tokens are checked on accessing datanodes.\n    ",
-        "property_name" : "dfs.block.access.token.enable",
-        "property_value" : "true",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.block.local-path-access.user",
-      "StackConfigurations" : {
-        "property_description" : "the user who is allowed to perform short\n      circuit reads.\n    ",
-        "property_name" : "dfs.block.local-path-access.user",
-        "property_value" : "hbase",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.block.size",
-      "StackConfigurations" : {
-        "property_description" : "The default block size for new files.",
-        "property_name" : "dfs.block.size",
-        "property_value" : "134217728",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.blockreport.initialDelay",
-      "StackConfigurations" : {
-        "property_description" : "Delay for first block report in seconds.",
-        "property_name" : "dfs.blockreport.initialDelay",
-        "property_value" : "120",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.cluster.administrators",
-      "StackConfigurations" : {
-        "property_description" : "ACL for who all can view the default servlets in the HDFS",
-        "property_name" : "dfs.cluster.administrators",
-        "property_value" : " hdfs",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.data.dir",
-      "StackConfigurations" : {
-        "property_description" : "Determines where on the local filesystem an DFS data node\n      should store its blocks.  If this is a comma-delimited\n      list of directories, then data will be stored in all named\n      directories, typically on different devices.\n      Directories that do not exist are ignored.\n    ",
-        "property_name" : "dfs.data.dir",
-        "property_value" : "/hadoop/hdfs/data",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.address",
-      "StackConfigurations" : {
-        "property_description" : "\n      The datanode server address and port for data transfer.\n    ",
-        "property_name" : "dfs.datanode.address",
-        "property_value" : "0.0.0.0:${ambari.dfs.datanode.port}",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.data.dir.perm",
-      "StackConfigurations" : {
-        "property_description" : "The permissions that should be there on dfs.data.dir\n      directories. The datanode will not come up if the permissions are\n      different on existing dfs.data.dir directories. If the directories\n      don't exist, they will be created with this permission.",
-        "property_name" : "dfs.datanode.data.dir.perm",
-        "property_value" : "750",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.du.pct",
-      "StackConfigurations" : {
-        "property_description" : "When calculating remaining space, only use this percentage of the real available space\n    ",
-        "property_name" : "dfs.datanode.du.pct",
-        "property_value" : "0.85f",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.du.reserved",
-      "StackConfigurations" : {
-        "property_description" : "Reserved space in bytes per volume. Always leave this much space free for non dfs use.\n    ",
-        "property_name" : "dfs.datanode.du.reserved",
-        "property_value" : "1073741824",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.failed.volumes.tolerated",
-      "StackConfigurations" : {
-        "property_description" : "#of failed disks dn would tolerate",
-        "property_name" : "dfs.datanode.failed.volumes.tolerated",
-        "property_value" : "0",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.failed.volumes.tolerated",
-      "StackConfigurations" : {
-        "property_description" : "Number of failed disks datanode would tolerate",
-        "property_name" : "dfs.datanode.failed.volumes.tolerated",
-        "property_value" : "0",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.http.address",
-      "StackConfigurations" : {
-        "property_description" : "\n      The datanode http server address and port.\n    ",
-        "property_name" : "dfs.datanode.http.address",
-        "property_value" : "0.0.0.0:${ambari.dfs.datanode.http.port}",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.ipc.address",
-      "StackConfigurations" : {
-        "property_description" : "\n      The datanode ipc server address and port.\n      If the port is 0 then the server will start on a free port.\n    ",
-        "property_name" : "dfs.datanode.ipc.address",
-        "property_value" : "0.0.0.0:8010",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.max.xcievers",
-      "StackConfigurations" : {
-        "property_description" : "PRIVATE CONFIG VARIABLE",
-        "property_name" : "dfs.datanode.max.xcievers",
-        "property_value" : "4096",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.socket.write.timeout",
-      "StackConfigurations" : {
-        "property_description" : "DFS Client write socket timeout",
-        "property_name" : "dfs.datanode.socket.write.timeout",
-        "property_value" : "0",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.heartbeat.interval",
-      "StackConfigurations" : {
-        "property_description" : "Determines datanode heartbeat interval in seconds.",
-        "property_name" : "dfs.heartbeat.interval",
-        "property_value" : "3",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.hosts",
-      "StackConfigurations" : {
-        "property_description" : "Names a file that contains a list of hosts that are\n      permitted to connect to the namenode. The full pathname of the file\n      must be specified.  If the value is empty, all hosts are\n      permitted.",
-        "property_name" : "dfs.hosts",
-        "property_value" : "/etc/hadoop/conf/dfs.include",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.hosts.exclude",
-      "StackConfigurations" : {
-        "property_description" : "Names a file that contains a list of hosts that are\n      not permitted to connect to the namenode.  The full pathname of the\n      file must be specified.  If the value is empty, no hosts are\n      excluded.",
-        "property_name" : "dfs.hosts.exclude",
-        "property_value" : "/etc/hadoop/conf/dfs.exclude",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.http.address",
-      "StackConfigurations" : {
-        "property_description" : "The name of the default file system.  Either the\n      literal string \"local\" or a host:port for NDFS.",
-        "property_name" : "dfs.http.address",
-        "property_value" : "localhost:50070",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.https.address",
-      "StackConfigurations" : {
-        "property_description" : "The https address where namenode binds",
-        "property_name" : "dfs.https.address",
-        "property_value" : "localhost:50470",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.https.port",
-      "StackConfigurations" : {
-        "property_description" : "\n      This property is used by HftpFileSystem.\n    ",
-        "property_name" : "dfs.https.port",
-        "property_value" : "50070",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.https.port",
-      "StackConfigurations" : {
-        "property_description" : "The https port where namenode binds",
-        "property_name" : "dfs.https.port",
-        "property_value" : "50470",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.name.dir",
-      "StackConfigurations" : {
-        "property_description" : "Determines where on the local filesystem the DFS name node\n      should store the name table.  If this is a comma-delimited list\n      of directories then the name table is replicated in all of the\n      directories, for redundancy. ",
-        "property_name" : "dfs.name.dir",
-        "property_value" : "/hadoop/hdfs/namenode",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.namenode.avoid.read.stale.datanode",
-      "StackConfigurations" : {
-        "property_description" : "\n      Indicate whether or not to avoid reading from stale datanodes whose\n      heartbeat messages have not been received by the namenode for more than a\n      specified time interval.\n    ",
-        "property_name" : "dfs.namenode.avoid.read.stale.datanode",
-        "property_value" : "true",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.namenode.avoid.write.stale.datanode",
-      "StackConfigurations" : {
-        "property_description" : "\n      Indicate whether or not to avoid writing to stale datanodes whose\n      heartbeat messages have not been received by the namenode for more than a\n      specified time interval.\n    ",
-        "property_name" : "dfs.namenode.avoid.write.stale.datanode",
-        "property_value" : "true",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.namenode.handler.count",
-      "StackConfigurations" : {
-        "property_description" : "Added to grow Queue size so that more client connections are allowed",
-        "property_name" : "dfs.namenode.handler.count",
-        "property_value" : "100",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.namenode.handler.count",
-      "StackConfigurations" : {
-        "property_description" : "The number of server threads for the namenode.",
-        "property_name" : "dfs.namenode.handler.count",
-        "property_value" : "40",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.namenode.stale.datanode.interval",
-      "StackConfigurations" : {
-        "property_description" : "Datanode is stale after not getting a heartbeat in this interval in ms",
-        "property_name" : "dfs.namenode.stale.datanode.interval",
-        "property_value" : "30000",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.namenode.write.stale.datanode.ratio",
-      "StackConfigurations" : {
-        "property_description" : "When the ratio of number stale datanodes to total datanodes marked is greater\n      than this ratio, stop avoiding writing to stale nodes so as to prevent causing hotspots.\n    ",
-        "property_name" : "dfs.namenode.write.stale.datanode.ratio",
-        "property_value" : "1.0f",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.permissions",
-      "StackConfigurations" : {
-        "property_description" : "\n      If \"true\", enable permission checking in HDFS.\n      If \"false\", permission checking is turned off,\n      but all other behavior is unchanged.\n      Switching from one parameter value to the other does not change the mode,\n      owner or group of files or directories.\n    ",
-        "property_name" : "dfs.permissions",
-        "property_value" : "true",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.permissions.supergroup",
-      "StackConfigurations" : {
-        "property_description" : "The name of the group of super-users.",
-        "property_name" : "dfs.permissions.supergroup",
-        "property_value" : "hdfs",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.replication",
-      "StackConfigurations" : {
-        "property_description" : "Default block replication.\n    ",
-        "property_name" : "dfs.replication",
-        "property_value" : "3",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.replication.max",
-      "StackConfigurations" : {
-        "property_description" : "Maximal block replication.\n    ",
-        "property_name" : "dfs.replication.max",
-        "property_value" : "50",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.safemode.threshold.pct",
-      "StackConfigurations" : {
-        "property_description" : "\n      Specifies the percentage of blocks that should satisfy\n      the minimal replication requirement defined by dfs.replication.min.\n      Values less than or equal to 0 mean not to start in safe mode.\n      Values greater than 1 will make safe mode permanent.\n    ",
-        "property_name" : "dfs.safemode.threshold.pct",
-        "property_value" : "1.0f",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.secondary.http.address",
-      "StackConfigurations" : {
-        "property_description" : "Address of secondary namenode web server",
-        "property_name" : "dfs.secondary.http.address",
-        "property_value" : "localhost:50090",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.secondary.https.port",
-      "StackConfigurations" : {
-        "property_description" : "The https port where secondary-namenode binds",
-        "property_name" : "dfs.secondary.https.port",
-        "property_value" : "50490",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.support.append",
-      "StackConfigurations" : {
-        "property_description" : "to enable dfs append",
-        "property_name" : "dfs.support.append",
-        "property_value" : "true",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.umaskmode",
-      "StackConfigurations" : {
-        "property_description" : "\n      The octal umask used when creating files and directories.\n    ",
-        "property_name" : "dfs.umaskmode",
-        "property_value" : "077",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.web.ugi",
-      "StackConfigurations" : {
-        "property_description" : "The user account used by the web interface.\n      Syntax: USERNAME,GROUP1,GROUP2, ...\n    ",
-        "property_name" : "dfs.web.ugi",
-        "property_value" : "gopher,gopher",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.webhdfs.enabled",
-      "StackConfigurations" : {
-        "property_description" : "to enable webhdfs",
-        "property_name" : "dfs.webhdfs.enabled",
-        "property_value" : "true",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_block_local_path_access_user",
-      "StackConfigurations" : {
-        "property_description" : "Default Block Replication.",
-        "property_name" : "dfs_block_local_path_access_user",
-        "property_value" : "hbase",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_data_dir",
-      "StackConfigurations" : {
-        "property_description" : "Data directories for Data Nodes.",
-        "property_name" : "dfs_data_dir",
-        "property_value" : "/hadoop/hdfs/data",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_datanode_address",
-      "StackConfigurations" : {
-        "property_description" : "Port for datanode address.",
-        "property_name" : "dfs_datanode_address",
-        "property_value" : "50010",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_datanode_data_dir_perm",
-      "StackConfigurations" : {
-        "property_description" : "Datanode dir perms.",
-        "property_name" : "dfs_datanode_data_dir_perm",
-        "property_value" : "750",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_datanode_failed_volume_tolerated",
-      "StackConfigurations" : {
-        "property_description" : "DataNode volumes failure toleration",
-        "property_name" : "dfs_datanode_failed_volume_tolerated",
-        "property_value" : "0",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_datanode_http_address",
-      "StackConfigurations" : {
-        "property_description" : "Port for datanode address.",
-        "property_name" : "dfs_datanode_http_address",
-        "property_value" : "50075",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_name_dir",
-      "StackConfigurations" : {
-        "property_description" : "NameNode Directories.",
-        "property_name" : "dfs_name_dir",
-        "property_value" : "/hadoop/hdfs/namenode",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_replication",
-      "StackConfigurations" : {
-        "property_description" : "Default Block Replication.",
-        "property_name" : "dfs_replication",
-        "property_value" : "3",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_webhdfs_enabled",
-      "StackConfigurations" : {
-        "property_description" : "WebHDFS enabled",
-        "property_name" : "dfs_webhdfs_enabled",
-        "property_value" : "true",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dtnode_heapsize",
-      "StackConfigurations" : {
-        "property_description" : "DataNode maximum Java heap size",
-        "property_name" : "dtnode_heapsize",
-        "property_value" : "1024",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs.checkpoint.dir",
-      "StackConfigurations" : {
-        "property_description" : "Determines where on the local filesystem the DFS secondary\n        name node should store the temporary images to merge.\n        If this is a comma-delimited list of directories then the image is\n        replicated in all of the directories for redundancy.\n    ",
-        "property_name" : "fs.checkpoint.dir",
-        "property_value" : "/hadoop/hdfs/namesecondary",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "core-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs.checkpoint.edits.dir",
-      "StackConfigurations" : {
-        "property_description" : "Determines where on the local filesystem the DFS secondary\n        name node should store the temporary edits to merge.\n        If this is a comma-delimited list of directoires then teh edits is\n        replicated in all of the directoires for redundancy.\n        Default value is same as fs.checkpoint.dir\n    ",
-        "property_name" : "fs.checkpoint.edits.dir",
-        "property_value" : "${fs.checkpoint.dir}",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "core-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs.checkpoint.period",
-      "StackConfigurations" : {
-        "property_description" : "The number of seconds between two periodic checkpoints.\n  ",
-        "property_name" : "fs.checkpoint.period",
-        "property_value" : "21600",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "core-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs.checkpoint.size",
-      "StackConfigurations" : {
-        "property_description" : "The size of the current edit log (in bytes) that triggers\n       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.\n  ",
-        "property_name" : "fs.checkpoint.size",
-        "property_value" : "67108864",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "core-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs.default.name",
-      "StackConfigurations" : {
-        "property_description" : "The name of the default file system.  Either the\n  literal string \"local\" or a host:port for NDFS.",
-        "property_name" : "fs.default.name",
-        "property_value" : "hdfs://localhost:8020",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "core-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs.trash.interval",
-      "StackConfigurations" : {
-        "property_description" : "Number of minutes between trash checkpoints.\n  If zero, the trash feature is disabled.\n  ",
-        "property_name" : "fs.trash.interval",
-        "property_value" : "360",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "core-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs_checkpoint_dir",
-      "StackConfigurations" : {
-        "property_description" : "Secondary NameNode checkpoint dir.",
-        "property_name" : "fs_checkpoint_dir",
-        "property_value" : "/hadoop/hdfs/namesecondary",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs_checkpoint_period",
-      "StackConfigurations" : {
-        "property_description" : "HDFS Maximum Checkpoint Delay",
-        "property_name" : "fs_checkpoint_period",
-        "property_value" : "21600",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs_checkpoint_size",
-      "StackConfigurations" : {
-        "property_description" : "FS Checkpoint Size.",
-        "property_name" : "fs_checkpoint_size",
-        "property_value" : "0.5",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/hadoop.security.authentication",
-      "StackConfigurations" : {
-        "property_description" : "\n   Set the authentication for the cluster. Valid values are: simple or\n   kerberos.\n   ",
-        "property_name" : "hadoop.security.authentication",
-        "property_value" : "simple",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "core-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/hadoop_heapsize",
-      "StackConfigurations" : {
-        "property_description" : "Hadoop maximum Java heap size",
-        "property_name" : "hadoop_heapsize",
-        "property_value" : "1024",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/hadoop_pid_dir_prefix",
-      "StackConfigurations" : {
-        "property_description" : "Hadoop PID Dir Prefix",
-        "property_name" : "hadoop_pid_dir_prefix",
-        "property_value" : "/var/run/hadoop",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/hdfs_log_dir_prefix",
-      "StackConfigurations" : {
-        "property_description" : "Hadoop Log Dir Prefix",
-        "property_name" : "hdfs_log_dir_prefix",
-        "property_value" : "/var/log/hadoop",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/hdfs_user",
-      "StackConfigurations" : {
-        "property_description" : "User and Groups.",
-        "property_name" : "hdfs_user",
-        "property_value" : "hdfs",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/io.compression.codec.lzo.class",
-      "StackConfigurations" : {
-        "property_description" : "The implementation for lzo codec.",
-        "property_name" : "io.compression.codec.lzo.class",
-        "property_value" : "com.hadoop.compression.lzo.LzoCodec",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "core-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/io.compression.codecs",
-      "StackConfigurations" : {
-        "property_description" : "A list of the compression codec classes that can be used\n                 for compression/decompression.",
-        "property_name" : "io.compression.codecs",
-        "property_value" : "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "core-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/io.file.buffer.size",
-      "StackConfigurations" : {
-        "property_description" : "The size of buffer for use in sequence files.\n  The size of this buffer should probably be a multiple of hardware\n  page size (4096 on Intel x86), and it determines how much data is\n  buffered during read and write operations.",
-        "property_name" : "io.file.buffer.size",
-        "property_value" : "131072",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "core-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/io.serializations",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_name" : "io.serializations",
-        "property_value" : "org.apache.hadoop.io.serializer.WritableSerialization",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "core-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.client.connect.max.retries",
-      "StackConfigurations" : {
-        "property_description" : "Defines the maximum number of retries for IPC connections.",
-        "property_name" : "ipc.client.connect.max.retries",
-        "property_value" : "50",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "core-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.client.connection.maxidletime",
-      "StackConfigurations" : {
-        "property_description" : "The maximum time after which a client will bring down the\n               connection to the server.\n  ",
-        "property_name" : "ipc.client.connection.maxidletime",
-        "property_value" : "30000",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "core-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.client.idlethreshold",
-      "StackConfigurations" : {
-        "property_description" : "Defines the threshold number of connections after which\n               connections will be inspected for idleness.\n  ",
-        "property_name" : "ipc.client.idlethreshold",
-        "property_value" : "8000",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "core-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.server.max.response.size",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_name" : "ipc.server.max.response.size",
-        "property_value" : "5242880",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.server.read.threadpool.size",
-      "StackConfigurations" : {
-        "property_description" : "",
-        "property_name" : "ipc.server.read.threadpool.size",
-        "property_value" : "5",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/kerberos_domain",
-      "StackConfigurations" : {
-        "property_description" : "Kerberos realm.",
-        "property_name" : "kerberos_domain",
-        "property_value" : "EXAMPLE.COM",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/keytab_path",
-      "StackConfigurations" : {
-        "property_description" : "Kerberos keytab path.",
-        "property_name" : "keytab_path",
-        "property_value" : "/etc/security/keytabs",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_formatted_mark_dir",
-      "StackConfigurations" : {
-        "property_description" : "Formatteed Mark Directory.",
-        "property_name" : "namenode_formatted_mark_dir",
-        "property_value" : "/var/run/hadoop/hdfs/namenode/formatted/",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_heapsize",
-      "StackConfigurations" : {
-        "property_description" : "NameNode Java heap size",
-        "property_name" : "namenode_heapsize",
-        "property_value" : "1024",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_opt_maxnewsize",
-      "StackConfigurations" : {
-        "property_description" : "NameNode maximum new generation size",
-        "property_name" : "namenode_opt_maxnewsize",
-        "property_value" : "200",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_opt_newsize",
-      "StackConfigurations" : {
-        "property_description" : "NameNode new generation size",
-        "property_name" : "namenode_opt_newsize",
-        "property_value" : "200",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_opt_maxpermsize",
-      "StackConfigurations" : {
-        "property_description" : "NameNode maximum permanent generation size",
-        "property_name" : "namenode_opt_maxpermsize",
-        "property_value" : "256",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_opt_permsize",
-      "StackConfigurations" : {
-        "property_description" : "NameNode permanent generation size",
-        "property_name" : "namenode_opt_permsize",
-        "property_value" : "128",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/proxyuser_group",
-      "StackConfigurations" : {
-        "property_description" : "Proxy user group.",
-        "property_name" : "proxyuser_group",
-        "property_value" : "users",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.client.datanode.protocol.acl",
-      "StackConfigurations" : {
-        "property_description" : "ACL for ClientDatanodeProtocol, the client-to-datanode protocol\n    for block recovery.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
-        "property_name" : "security.client.datanode.protocol.acl",
-        "property_value" : "*",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hadoop-policy.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.client.protocol.acl",
-      "StackConfigurations" : {
-        "property_description" : "ACL for ClientProtocol, which is used by user code\n    via the DistributedFileSystem.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
-        "property_name" : "security.client.protocol.acl",
-        "property_value" : "*",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hadoop-policy.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.datanode.protocol.acl",
-      "StackConfigurations" : {
-        "property_description" : "ACL for DatanodeProtocol, which is used by datanodes to\n    communicate with the namenode.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
-        "property_name" : "security.datanode.protocol.acl",
-        "property_value" : "*",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hadoop-policy.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.inter.datanode.protocol.acl",
-      "StackConfigurations" : {
-        "property_description" : "ACL for InterDatanodeProtocol, the inter-datanode protocol\n    for updating generation timestamp.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
-        "property_name" : "security.inter.datanode.protocol.acl",
-        "property_value" : "*",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hadoop-policy.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.inter.tracker.protocol.acl",
-      "StackConfigurations" : {
-        "property_description" : "ACL for InterTrackerProtocol, used by the tasktrackers to\n    communicate with the jobtracker.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
-        "property_name" : "security.inter.tracker.protocol.acl",
-        "property_value" : "*",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hadoop-policy.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.job.submission.protocol.acl",
-      "StackConfigurations" : {
-        "property_description" : "ACL for JobSubmissionProtocol, used by job clients to\n    communciate with the jobtracker for job submission, querying job status etc.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
-        "property_name" : "security.job.submission.protocol.acl",
-        "property_value" : "*",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hadoop-policy.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.namenode.protocol.acl",
-      "StackConfigurations" : {
-        "property_description" : "ACL for NamenodeProtocol, the protocol used by the secondary\n    namenode to communicate with the namenode.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
-        "property_name" : "security.namenode.protocol.acl",
-        "property_value" : "*",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hadoop-policy.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.task.umbilical.protocol.acl",
-      "StackConfigurations" : {
-        "property_description" : "ACL for TaskUmbilicalProtocol, used by the map and reduce\n    tasks to communicate with the parent tasktracker.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
-        "property_name" : "security.task.umbilical.protocol.acl",
-        "property_value" : "*",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hadoop-policy.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security_enabled",
-      "StackConfigurations" : {
-        "property_description" : "Hadoop Security",
-        "property_name" : "security_enabled",
-        "property_value" : "false",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/webinterface.private.actions",
-      "StackConfigurations" : {
-        "property_description" : " If set to true, the web interfaces of JT and NN may contain\n                actions, such as kill job, delete file, etc., that should\n                not be exposed to public. Enable this option if the interfaces\n                are only reachable by those who have the right authorization.\n  ",
-        "property_name" : "webinterface.private.actions",
-        "property_value" : "false",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "core-site.xml"
-      }
-    }
-  ]
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3ae9a5c4/ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/HIVE.json
----------------------------------------------------------------------
diff --git a/ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/HIVE.json b/ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/HIVE.json
deleted file mode 100644
index bdc60d3..0000000
--- a/ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/HIVE.json
+++ /dev/null
@@ -1,533 +0,0 @@
-{
-  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations?fields=*",
-  "items" : [
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/ambari.hive.db.schema.name",
-      "StackConfigurations" : {
-        "property_description" : "Database name used as the Hive Metastore",
-        "property_name" : "ambari.hive.db.schema.name",
-        "property_value" : "hive",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/fs.file.impl.disable.cache",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_name" : "fs.file.impl.disable.cache",
-        "property_value" : "true",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/fs.hdfs.impl.disable.cache",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_name" : "fs.hdfs.impl.disable.cache",
-        "property_value" : "true",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hcat_log_dir",
-      "StackConfigurations" : {
-        "property_description" : "WebHCat Log Dir.",
-        "property_name" : "hcat_log_dir",
-        "property_value" : "/var/log/webhcat",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hcat_pid_dir",
-      "StackConfigurations" : {
-        "property_description" : "WebHCat Pid Dir.",
-        "property_name" : "hcat_pid_dir",
-        "property_value" : "/var/run/webhcat",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hcat_user",
-      "StackConfigurations" : {
-        "property_description" : "HCat User.",
-        "property_name" : "hcat_user",
-        "property_value" : "hcat",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.auto.convert.join",
-      "StackConfigurations" : {
-        "property_description" : "Whether Hive enable the optimization about converting common\n      join into mapjoin based on the input file size.",
-        "property_name" : "hive.auto.convert.join",
-        "property_value" : "true",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.auto.convert.join.noconditionaltask",
-      "StackConfigurations" : {
-        "property_description" : "Whether Hive enable the optimization about converting common join into mapjoin based on the input file\n      size. If this paramater is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the\n      specified size, the join is directly converted to a mapjoin (there is no conditional task).\n    ",
-        "property_name" : "hive.auto.convert.join.noconditionaltask",
-        "property_value" : "true",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.auto.convert.join.noconditionaltask.size",
-      "StackConfigurations" : {
-        "property_description" : "If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it\n      is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly\n      converted to a mapjoin(there is no conditional task). The default is 10MB.\n    ",
-        "property_name" : "hive.auto.convert.join.noconditionaltask.size",
-        "property_value" : "1000000000",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.auto.convert.sortmerge.join",
-      "StackConfigurations" : {
-        "property_description" : "Will the join be automatically converted to a sort-merge join, if the joined tables pass\n      the criteria for sort-merge join.\n    ",
-        "property_name" : "hive.auto.convert.sortmerge.join",
-        "property_value" : "true",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.auto.convert.sortmerge.join.noconditionaltask",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_name" : "hive.auto.convert.sortmerge.join.noconditionaltask",
-        "property_value" : "true",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.enforce.bucketing",
-      "StackConfigurations" : {
-        "property_description" : "Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.",
-        "property_name" : "hive.enforce.bucketing",
-        "property_value" : "true",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.enforce.sorting",
-      "StackConfigurations" : {
-        "property_description" : "Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.",
-        "property_name" : "hive.enforce.sorting",
-        "property_value" : "true",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.map.aggr",
-      "StackConfigurations" : {
-        "property_description" : "Whether to use map-side aggregation in Hive Group By queries.",
-        "property_name" : "hive.map.aggr",
-        "property_value" : "true",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.mapjoin.bucket.cache.size",
-      "StackConfigurations" : {
-        "property_description" : "\n      Size per reducer.The default is 1G, i.e if the input size is 10G, it\n      will use 10 reducers.\n    ",
-        "property_name" : "hive.mapjoin.bucket.cache.size",
-        "property_value" : "10000",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.mapred.reduce.tasks.speculative.execution",
-      "StackConfigurations" : {
-        "property_description" : "Whether speculative execution for reducers should be turned on.",
-        "property_name" : "hive.mapred.reduce.tasks.speculative.execution",
-        "property_value" : "false",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.metastore.cache.pinobjtypes",
-      "StackConfigurations" : {
-        "property_description" : "List of comma separated metastore object types that should be pinned in the cache",
-        "property_name" : "hive.metastore.cache.pinobjtypes",
-        "property_value" : "Table,Database,Type,FieldSchema,Order",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.metastore.client.socket.timeout",
-      "StackConfigurations" : {
-        "property_description" : "MetaStore Client socket timeout in seconds",
-        "property_name" : "hive.metastore.client.socket.timeout",
-        "property_value" : "60",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.metastore.execute.setugi",
-      "StackConfigurations" : {
-        "property_description" : "In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.",
-        "property_name" : "hive.metastore.execute.setugi",
-        "property_value" : "true",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.metastore.uris",
-      "StackConfigurations" : {
-        "property_description" : "URI for client to contact metastore server",
-        "property_name" : "hive.metastore.uris",
-        "property_value" : "thrift://localhost:9083",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.metastore.warehouse.dir",
-      "StackConfigurations" : {
-        "property_description" : "location of default database for the warehouse",
-        "property_name" : "hive.metastore.warehouse.dir",
-        "property_value" : "/apps/hive/warehouse",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.optimize.bucketmapjoin",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_name" : "hive.optimize.bucketmapjoin",
-        "property_value" : "true",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.optimize.bucketmapjoin.sortedmerge",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_name" : "hive.optimize.bucketmapjoin.sortedmerge",
-        "property_value" : "true",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.optimize.mapjoin.mapreduce",
-      "StackConfigurations" : {
-        "property_description" : "If hive.auto.convert.join is off, this parameter does not take\n      affect. If it is on, and if there are map-join jobs followed by a map-reduce\n      job (for e.g a group by), each map-only job is merged with the following\n      map-reduce job.\n    ",
-        "property_name" : "hive.optimize.mapjoin.mapreduce",
-        "property_value" : "true",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.optimize.reducededuplication.min.reducer",
-      "StackConfigurations" : {
-        "property_description" : "Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.\n      That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.\n      The optimization will be disabled if number of reducers is less than specified value.\n    ",
-        "property_name" : "hive.optimize.reducededuplication.min.reducer",
-        "property_value" : "1",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.security.authorization.enabled",
-      "StackConfigurations" : {
-        "property_description" : "enable or disable the hive client authorization",
-        "property_name" : "hive.security.authorization.enabled",
-        "property_value" : "false",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.security.authorization.manager",
-      "StackConfigurations" : {
-        "property_description" : "the hive client authorization manager class name.\n      The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  ",
-        "property_name" : "hive.security.authorization.manager",
-        "property_value" : "org.apache.hcatalog.security.HdfsAuthorizationProvider",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.server2.enable.doAs",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_name" : "hive.server2.enable.doAs",
-        "property_value" : "true",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive_ambari_database",
-      "StackConfigurations" : {
-        "property_description" : "Database type.",
-        "property_name" : "hive_ambari_database",
-        "property_value" : "MySQL",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive_aux_jars_path",
-      "StackConfigurations" : {
-        "property_description" : "Hive auxiliary jar path.",
-        "property_name" : "hive_aux_jars_path",
-        "property_value" : "/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive_conf_dir",
-      "StackConfigurations" : {
-        "property_description" : "Hive Conf Dir.",
-        "property_name" : "hive_conf_dir",
-        "property_value" : "/etc/hive/conf",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive_dbroot",
-      "StackConfigurations" : {
-        "property_description" : "Hive DB Directory.",
-        "property_name" : "hive_dbroot",
-        "property_value" : "/usr/lib/hive/lib/",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive_lib",
-      "StackConfigurations" : {
-        "property_description" : "Hive Library.",
-        "property_name" : "hive_lib",
-        "property_value" : "/usr/lib/hive/lib/",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive_log_dir",
-      "StackConfigurations" : {
-        "property_description" : "Directory for Hive Log files.",
-        "property_name" : "hive_log_dir",
-        "property_value" : "/var/log/hive",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive_metastore_port",
-      "StackConfigurations" : {
-        "property_description" : "Hive Metastore port.",
-        "property_name" : "hive_metastore_port",
-        "property_value" : "9083",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive_metastore_user_name",
-      "StackConfigurations" : {
-        "property_description" : "Database username to use to connect to the database.",
-        "property_name" : "hive_metastore_user_name",
-        "property_value" : "hive",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive_pid_dir",
-      "StackConfigurations" : {
-        "property_description" : "Hive PID Dir.",
-        "property_name" : "hive_pid_dir",
-        "property_value" : "/var/run/hive",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive_user",
-      "StackConfigurations" : {
-        "property_description" : "Hive User.",
-        "property_name" : "hive_user",
-        "property_value" : "hive",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/javax.jdo.option.ConnectionDriverName",
-      "StackConfigurations" : {
-        "property_description" : "Driver class name for a JDBC metastore",
-        "property_name" : "javax.jdo.option.ConnectionDriverName",
-        "property_value" : "com.mysql.jdbc.Driver",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/javax.jdo.option.ConnectionPassword",
-      "StackConfigurations" : {
-        "property_description" : "password to use against metastore database",
-        "property_name" : "javax.jdo.option.ConnectionPassword",
-        "property_value" : " ",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/javax.jdo.option.ConnectionURL",
-      "StackConfigurations" : {
-        "property_description" : "JDBC connect string for a JDBC metastore",
-        "property_name" : "javax.jdo.option.ConnectionURL",
-        "property_value" : "jdbc",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/javax.jdo.option.ConnectionUserName",
-      "StackConfigurations" : {
-        "property_description" : "username to use against metastore database",
-        "property_name" : "javax.jdo.option.ConnectionUserName",
-        "property_value" : "hive",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/mysql_connector_url",
-      "StackConfigurations" : {
-        "property_description" : "Hive PID Dir.",
-        "property_name" : "mysql_connector_url",
-        "property_value" : "${download_url}/mysql-connector-java-5.1.18.zip",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/webhcat_user",
-      "StackConfigurations" : {
-        "property_description" : "WebHCat User.",
-        "property_name" : "webhcat_user",
-        "property_value" : "hcat",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "stack_version" : "1.3.0",
-        "type" : "global.xml"
-      }
-    }
-  ]
-}


Mime
View raw message