ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ababiic...@apache.org
Subject [2/3] ambari git commit: AMBARI-10082 Enhanced Configs: Create mapper for loading config versions and config properties. (ababiichuk)
Date Mon, 16 Mar 2015 17:29:53 GMT
http://git-wip-us.apache.org/repos/asf/ambari/blob/34c3951c/ambari-web/app/assets/data/configurations/config_versions.json
----------------------------------------------------------------------
diff --git a/ambari-web/app/assets/data/configurations/config_versions.json b/ambari-web/app/assets/data/configurations/config_versions.json
new file mode 100644
index 0000000..603d33e
--- /dev/null
+++ b/ambari-web/app/assets/data/configurations/config_versions.json
@@ -0,0 +1,1894 @@
+{
+  "href" : "http://c6401:8080/api/v1/clusters/1/configurations/service_config_versions/",
+  "items" : [
+    {
+      "cluster_name" : "1",
+      "configurations" : [
+        {
+          "Config" : {
+            "cluster_name" : "1"
+          },
+          "type" : "ams-hbase-log4j",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "content" : "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n# Define some default values that can be overridden by system properties\nhbase.root.logger=INFO,console\nhbase.security.logger=INFO,console\nhbase.log.dir=.\nhbas
 e.log.file=hbase.log\n\n# Define the root logger to the system property \"hbase.root.logger\".\nlog4j.rootLogger=${hbase.root.logger}\n\n# Logging Threshold\nlog4j.threshold=ALL\n\n#\n# Daily Rolling File Appender\n#\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n\n# Rolling File Appender properties\nhbase.log.maxfilesize=256MB\nhbase.log.maxbackupindex=20\n\n# Rolling File Appender\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hbase.log.dir}/${hbase.log.file}\n\nlog4j.appender.RFA.MaxFileSize=${hbase.log.maxfilesize}\nlog4j.appender.RFA.MaxBackupInd
 ex=${hbase.log.maxbackupindex}\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n\n#\n# Security audit appender\n#\nhbase.security.log.file=SecurityAuth.audit\nhbase.security.log.maxfilesize=256MB\nhbase.security.log.maxbackupindex=20\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hbase.log.dir}/${hbase.security.log.file}\nlog4j.appender.RFAS.MaxFileSize=${hbase.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hbase.security.log.maxbackupindex}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.category.SecurityLogger=${hbase.security.logger}\nlog4j.additivity.SecurityLogger=false\n#log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.access.AccessController=TRACE\n\n#\n# Null Appender\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppend
 er\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n\n# Custom Logging levels\n\nlog4j.logger.org.apache.zookeeper=INFO\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.hbase=INFO\n# Make these two classes INFO-level. Make them DEBUG to see more zk debug.\nlog4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO\nlog4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO\n#log4j.logger.org.apache.hadoop.dfs=DEBUG\n# Set this class to log INFO only otherwise its OTT\n# Enable this to get detailed connection error/retry logging.\n# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE\n\n\n# Uncomment this line to enable t
 racing on _every_ RPC call (this can be a lot of output)\n#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG\n\n# Uncomment the below if you want to remove logging of client region caching'\n# and scan of .META. messages\n# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO\n# log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO"
+          },
+          "properties_attributes" : { }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "1"
+          },
+          "type" : "ams-log4j",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "content" : "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n# Define some default values that can be overridden by system properties\nams.log.dir=.\nams.log.file=ambari-metrics-collector.log\n\n# Root logger option\nlog
 4j.rootLogger=INFO,file\n\n# Direct log messages to a log file\nlog4j.appender.file=org.apache.log4j.RollingFileAppender\nlog4j.appender.file.File=${ams.log.dir}/${ams.log.file}\nlog4j.appender.file.MaxFileSize=80MB\nlog4j.appender.file.MaxBackupIndex=60\nlog4j.appender.file.layout=org.apache.log4j.PatternLayout\nlog4j.appender.file.layout.ConversionPattern=%d{ABSOLUTE} %5p [%t] %c{1}:%L - %m%n"
+          },
+          "properties_attributes" : { }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "1"
+          },
+          "type" : "ams-hbase-site",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "hbase.client.scanner.caching" : "10000",
+            "hbase.client.scanner.timeout.period" : "900000",
+            "hbase.cluster.distributed" : "false",
+            "hbase.hregion.majorcompaction" : "0",
+            "hbase.hregion.memstore.block.multiplier" : "4",
+            "hbase.hregion.memstore.flush.size" : "134217728",
+            "hbase.hstore.blockingStoreFiles" : "200",
+            "hbase.hstore.flusher.count" : "2",
+            "hbase.local.dir" : "${hbase.tmp.dir}/local",
+            "hbase.master.info.bindAddress" : "0.0.0.0",
+            "hbase.master.info.port" : "61310",
+            "hbase.master.port" : "61300",
+            "hbase.master.wait.on.regionservers.mintostart" : "1",
+            "hbase.regionserver.global.memstore.lowerLimit" : "0.4",
+            "hbase.regionserver.global.memstore.upperLimit" : "0.5",
+            "hbase.regionserver.info.port" : "61330",
+            "hbase.regionserver.port" : "61320",
+            "hbase.regionserver.thread.compaction.large" : "2",
+            "hbase.regionserver.thread.compaction.small" : "3",
+            "hbase.replication" : "false",
+            "hbase.rootdir" : "file:///var/lib/ambari-metrics-collector/hbase",
+            "hbase.snapshot.enabled" : "false",
+            "hbase.tmp.dir" : "/var/lib/ambari-metrics-collector/hbase-tmp",
+            "hbase.zookeeper.leaderport" : "61388",
+            "hbase.zookeeper.peerport" : "61288",
+            "hbase.zookeeper.property.clientPort" : "61181",
+            "hbase.zookeeper.property.dataDir" : "${hbase.tmp.dir}/zookeeper",
+            "hbase.zookeeper.quorum" : "{{zookeeper_quorum_hosts}}",
+            "hfile.block.cache.size" : "0.3",
+            "phoenix.groupby.maxCacheSize" : "307200000",
+            "phoenix.query.spoolThresholdBytes" : "12582912",
+            "phoenix.query.timeoutMs" : "1200000",
+            "phoenix.sequence.saltBuckets" : "2",
+            "zookeeper.session.timeout" : "120000"
+          },
+          "properties_attributes" : { }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "1"
+          },
+          "type" : "ams-hbase-policy",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "security.admin.protocol.acl" : "*",
+            "security.client.protocol.acl" : "*",
+            "security.masterregion.protocol.acl" : "*"
+          },
+          "properties_attributes" : { }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "1"
+          },
+          "type" : "ams-site",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "timeline.metrics.aggregator.checkpoint.dir" : "/var/lib/ambari-metrics-collector/checkpoint",
+            "timeline.metrics.cluster.aggregator.hourly.checkpointCutOffMultiplier" : "2",
+            "timeline.metrics.cluster.aggregator.hourly.disabled" : "false",
+            "timeline.metrics.cluster.aggregator.hourly.interval" : "3600",
+            "timeline.metrics.cluster.aggregator.hourly.ttl" : "31536000",
+            "timeline.metrics.cluster.aggregator.minute.checkpointCutOffMultiplier" : "2",
+            "timeline.metrics.cluster.aggregator.minute.disabled" : "false",
+            "timeline.metrics.cluster.aggregator.minute.interval" : "120",
+            "timeline.metrics.cluster.aggregator.minute.timeslice.interval" : "15",
+            "timeline.metrics.cluster.aggregator.minute.ttl" : "2592000",
+            "timeline.metrics.hbase.compression.scheme" : "SNAPPY",
+            "timeline.metrics.hbase.data.block.encoding" : "FAST_DIFF",
+            "timeline.metrics.host.aggregator.hourly.checkpointCutOffMultiplier" : "2",
+            "timeline.metrics.host.aggregator.hourly.disabled" : "false",
+            "timeline.metrics.host.aggregator.hourly.interval" : "3600",
+            "timeline.metrics.host.aggregator.hourly.ttl" : "2592000",
+            "timeline.metrics.host.aggregator.minute.checkpointCutOffMultiplier" : "2",
+            "timeline.metrics.host.aggregator.minute.disabled" : "false",
+            "timeline.metrics.host.aggregator.minute.interval" : "300",
+            "timeline.metrics.host.aggregator.minute.ttl" : "604800",
+            "timeline.metrics.host.aggregator.ttl" : "86400",
+            "timeline.metrics.service.checkpointDelay" : "60",
+            "timeline.metrics.service.default.result.limit" : "5760",
+            "timeline.metrics.service.operation.mode" : "embedded",
+            "timeline.metrics.service.resultset.fetchSize" : "2000",
+            "timeline.metrics.service.rpc.address" : "0.0.0.0:60200",
+            "timeline.metrics.service.webapp.address" : "0.0.0.0:6188"
+          },
+          "properties_attributes" : { }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "1"
+          },
+          "type" : "ams-env",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "ambari_metrics_user" : "ams",
+            "content" : "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# Collector Log directory for log4j\nexport AMS_COLLECTOR_LOG_DIR={{ams_collector_log_dir}}\n\n# Monitor Log directory for outfile\nexport AMS_MONITOR_LOG_DIR={{ams_monitor_log_dir}}\n\n# Collector pid directory\nexport AMS_COLLECTOR_PID_DIR={{ams_collector_pid_dir}}\n\n# Monitor pid directory\nexport AMS_MONITOR_PID_DIR={{ams_monitor_pid_dir}}\n\n# AMS HBase pid directory\nexport AMS_HBASE_PID_DIR={{hbase_pid_dir}}\n\n# AMS Collector options\nexport AMS_COLLECTOR_OPTS=\"-Djava.library.path=/usr/lib/ams-hbase/lib/hadoop-native\"\n{% if security_enabled %}\nexport AMS_COLLECTOR_OPTS=\"$AMS_COLLECTOR_OPTS -Djava.security.auth.login.config={{ams_collector_jaas_config_file}}\"\n{% endif %}",
+            "metrics_collector_log_dir" : "/var/log/ambari-metrics-collector",
+            "metrics_collector_pid_dir" : "/var/run/ambari-metrics-collector",
+            "metrics_monitor_log_dir" : "/var/log/ambari-metrics-monitor",
+            "metrics_monitor_pid_dir" : "/var/run/ambari-metrics-monitor"
+          },
+          "properties_attributes" : { }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "1"
+          },
+          "type" : "ams-hbase-env",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "content" : "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HBASE_HEAPSIZE={{hbase_heapsize}}\n\n# Extra Java runtime options.\n# Below are what we set by default. May only work with SUN JVM.\n# For more on why as well as other possible settings,\n# see http://wiki.apache.org/hadoop/PerformanceTuning\nexport HBASE_OPTS=\"-XX:+UseConcMarkSweepGC -XX:ErrorFile={{hbase_log_dir}}/hs_err_pid%p.log -Djava.io.tmpdir={{hbase_tmp_dir}}\"\nexport SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{hbase_log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable java garbage collection logging.\n# export HBASE_OP
 TS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable JMX exporting\n# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\nexport HBASE_MASTER_OPTS=\"-Xmx{{master_heapsize}}\"\nexport HBASE_REGIONSERVER_OPTS=\"-Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexpo
 rt HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# Extra ssh options. Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by default.\nexport HBASE_LOG_DIR={{hbase_log_dir}}\n\n# A string representing this instance of hbase. $USER by default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The directory where pid files are stored. /tmp by default.\nexport HBASE_PID_DIR={{hbase_pid_dir}}\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -Djava.security.aut
 h.login.config={{client_jaas_config_file}}\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}}\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\nexport HBASE_ZOOKEEPER_OPTS=\"$HBASE_ZOOKEEPER_OPTS -Djava.security.auth.login.config={{ams_zookeeper_jaas_config_file}}\"\n{% endif %}\n\n# use embedded native libs\n_HADOOP_NATIVE_LIB=\"/usr/lib/ams-hbase/lib/hadoop-native/\"\nexport HBASE_OPTS=\"$HBASE_OPTS -Djava.library.path=${_HADOOP_NATIVE_LIB}\"\n\n# Unset HADOOP_HOME to avoid importing HADOOP installed cluster related configs like: /usr/hdp/2.2.0.0-2041/hadoop/conf/\nexport HADOOP_HOME={{ams_hbase_home_dir}}",
+            "hbase_log_dir" : "/var/log/ambari-metrics-collector",
+            "hbase_master_heapsize" : "1024m",
+            "hbase_pid_dir" : "/var/run/ambari-metrics-collector/",
+            "hbase_regionserver_heapsize" : "1024m",
+            "hbase_regionserver_xmn_max" : "512m",
+            "hbase_regionserver_xmn_ratio" : "0.2"
+          },
+          "properties_attributes" : { }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "1"
+          },
+          "type" : "ams-hbase-security-site",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "ams.zookeeper.keytab" : "",
+            "ams.zookeeper.principal" : "",
+            "hadoop.security.authentication" : "",
+            "hbase.coprocessor.master.classes" : "",
+            "hbase.coprocessor.region.classes" : "",
+            "hbase.master.kerberos.principal" : "",
+            "hbase.master.keytab.file" : "",
+            "hbase.myclient.keytab" : "",
+            "hbase.myclient.principal" : "",
+            "hbase.regionserver.kerberos.principal" : "",
+            "hbase.regionserver.keytab.file" : "",
+            "hbase.security.authentication" : "",
+            "hbase.security.authorization" : "",
+            "hbase.zookeeper.property.authProvider.1" : "",
+            "hbase.zookeeper.property.jaasLoginRenew" : "",
+            "hbase.zookeeper.property.kerberos.removeHostFromPrincipal" : "",
+            "hbase.zookeeper.property.kerberos.removeRealmFromPrincipal" : "",
+            "zookeeper.znode.parent" : ""
+          },
+          "properties_attributes" : { }
+        }
+      ],
+      "createtime" : 1425979244738,
+      "group_id" : -1,
+      "group_name" : "default",
+      "hosts" : [ ],
+      "is_current" : true,
+      "service_config_version" : 1,
+      "service_config_version_note" : "Initial configurations for Ambari Metrics",
+      "service_name" : "AMBARI_METRICS",
+      "user" : "admin"
+    },
+    {
+      "cluster_name" : "1",
+      "configurations" : [
+        {
+          "Config" : {
+            "cluster_name" : "1"
+          },
+          "type" : "hadoop-env",
+          "tag" : "version1426088081862",
+          "version" : 2,
+          "properties" : {
+            "dtnode_heapsize" : "1026m"
+          },
+          "properties_attributes" : { }
+        }
+      ],
+      "createtime" : 1426088137115,
+      "group_id" : 2,
+      "group_name" : "1",
+      "hosts" : [ ],
+      "is_current" : false,
+      "service_config_version" : 4,
+      "service_config_version_note" : "",
+      "service_name" : "HDFS",
+      "user" : "admin"
+    },
+    {
+      "cluster_name" : "1",
+      "configurations" : [
+        {
+          "Config" : {
+            "cluster_name" : "1"
+          },
+          "type" : "hadoop-env",
+          "tag" : "version1426088081862",
+          "version" : 2,
+          "properties" : {
+            "dtnode_heapsize" : "1026m"
+          },
+          "properties_attributes" : { }
+        }
+      ],
+      "createtime" : 1426159024121,
+      "group_id" : 2,
+      "group_name" : "1",
+      "hosts" : [ ],
+      "is_current" : false,
+      "service_config_version" : 5,
+      "service_config_version_note" : null,
+      "service_name" : "HDFS",
+      "user" : "admin"
+    },
+    {
+      "cluster_name" : "1",
+      "configurations" : [
+        {
+          "Config" : {
+            "cluster_name" : "1"
+          },
+          "type" : "hadoop-env",
+          "tag" : "version1426088081862",
+          "version" : 2,
+          "properties" : {
+            "dtnode_heapsize" : "1026m"
+          },
+          "properties_attributes" : { }
+        }
+      ],
+      "createtime" : 1426159024338,
+      "group_id" : 2,
+      "group_name" : "1",
+      "hosts" : [
+        "c6401.ambari.apache.org"
+      ],
+      "is_current" : true,
+      "service_config_version" : 6,
+      "service_config_version_note" : null,
+      "service_name" : "HDFS",
+      "user" : "admin"
+    },
+    {
+      "cluster_name" : "1",
+      "configurations" : [
+        {
+          "Config" : {
+            "cluster_name" : "1"
+          },
+          "type" : "hadoop-policy",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "security.admin.operations.protocol.acl" : "hadoop",
+            "security.client.datanode.protocol.acl" : "*",
+            "security.client.protocol.acl" : "*",
+            "security.datanode.protocol.acl" : "*",
+            "security.inter.datanode.protocol.acl" : "*",
+            "security.inter.tracker.protocol.acl" : "*",
+            "security.job.client.protocol.acl" : "*",
+            "security.job.task.protocol.acl" : "*",
+            "security.namenode.protocol.acl" : "*",
+            "security.refresh.policy.protocol.acl" : "hadoop",
+            "security.refresh.usertogroups.mappings.protocol.acl" : "hadoop"
+          },
+          "properties_attributes" : { }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "1"
+          },
+          "type" : "hdfs-log4j",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "content" : "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.l
 ogger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appende
 r.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.log
 ger}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit
 =false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# mapred audit logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.f
 ile}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\
 nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateChange=WARN"
+          },
+          "properties_attributes" : { }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "1"
+          },
+          "type" : "hadoop-env",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "content" : "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options append
 ed to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateSta
 mps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The followin
 g applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HA
 DOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share
 /java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP
 _OPTS\"",
+            "dfs.datanode.data.dir.mount.file" : "/etc/hadoop/conf/dfs_data_dir_mount.hist",
+            "dtnode_heapsize" : "1024m",
+            "hadoop_heapsize" : "1024",
+            "hadoop_pid_dir_prefix" : "/var/run/hadoop",
+            "hadoop_root_logger" : "INFO,RFA",
+            "hdfs_log_dir_prefix" : "/var/log/hadoop",
+            "hdfs_user" : "hdfs",
+            "namenode_heapsize" : "1024m",
+            "namenode_opt_maxnewsize" : "256m",
+            "namenode_opt_maxpermsize" : "256m",
+            "namenode_opt_newsize" : "256m",
+            "namenode_opt_permsize" : "128m",
+            "proxyuser_group" : "users"
+          },
+          "properties_attributes" : { }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "1"
+          },
+          "type" : "ranger-hdfs-plugin-properties",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "REPOSITORY_CONFIG_PASSWORD" : "hadoop",
+            "REPOSITORY_CONFIG_USERNAME" : "hadoop",
+            "SSL_KEYSTORE_FILE_PATH" : "/etc/hadoop/conf/ranger-plugin-keystore.jks",
+            "SSL_KEYSTORE_PASSWORD" : "myKeyFilePassword",
+            "SSL_TRUSTSTORE_FILE_PATH" : "/etc/hadoop/conf/ranger-plugin-truststore.jks",
+            "SSL_TRUSTSTORE_PASSWORD" : "changeit",
+            "XAAUDIT.DB.IS_ENABLED" : "true",
+            "XAAUDIT.HDFS.DESTINATION_DIRECTORY" : "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
+            "XAAUDIT.HDFS.DESTINTATION_FILE" : "%hostname%-audit.log",
+            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS" : "900",
+            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS" : "60",
+            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS" : "86400",
+            "XAAUDIT.HDFS.IS_ENABLED" : "false",
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY" : "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT" : "10",
+            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY" : "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE" : "%time:yyyyMMdd-HHmm.ss%.log",
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS" : "60",
+            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS" : "600",
+            "common.name.for.certificate" : "-",
+            "hadoop.rpc.protection" : "-",
+            "policy_user" : "ambari-qa",
+            "ranger-hdfs-plugin-enabled" : "No"
+          },
+          "properties_attributes" : { }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "1"
+          },
+          "type" : "core-site",
+          "tag" : "version1426061654803",
+          "version" : 2,
+          "properties" : {
+            "fs.defaultFS" : "hdfs://ha1",
+            "fs.trash.interval" : "360",
+            "ha.failover-controller.active-standby-elector.zk.op.retries" : "120",
+            "ha.zookeeper.quorum" : "c6401.ambari.apache.org:2181,c6402.ambari.apache.org:2181,c6403.ambari.apache.org:2181",
+            "hadoop.http.authentication.simple.anonymous.allowed" : "true",
+            "hadoop.proxyuser.hcat.groups" : "users",
+            "hadoop.proxyuser.hcat.hosts" : "c6402.ambari.apache.org",
+            "hadoop.proxyuser.hive.groups" : "users",
+            "hadoop.proxyuser.hive.hosts" : "c6402.ambari.apache.org",
+            "hadoop.security.auth_to_local" : "\n        DEFAULT",
+            "hadoop.security.authentication" : "simple",
+            "hadoop.security.authorization" : "false",
+            "io.compression.codecs" : "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec",
+            "io.file.buffer.size" : "131072",
+            "io.serializations" : "org.apache.hadoop.io.serializer.WritableSerialization",
+            "ipc.client.connect.max.retries" : "50",
+            "ipc.client.connection.maxidletime" : "30000",
+            "ipc.client.idlethreshold" : "8000",
+            "ipc.server.tcpnodelay" : "true",
+            "mapreduce.jobtracker.webinterface.trusted" : "false",
+            "proxyuser_group" : "users"
+          },
+          "properties_attributes" : {
+            "final" : {
+              "fs.defaultFS" : "true"
+            }
+          }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "1"
+          },
+          "type" : "hdfs-site",
+          "tag" : "version1426061654803",
+          "version" : 2,
+          "properties" : {
+            "dfs.block.access.token.enable" : "true",
+            "dfs.blockreport.initialDelay" : "120",
+            "dfs.blocksize" : "134217728",
+            "dfs.client.failover.proxy.provider.ha1" : "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider",
+            "dfs.client.read.shortcircuit" : "true",
+            "dfs.client.read.shortcircuit.streams.cache.size" : "4096",
+            "dfs.cluster.administrators" : " hdfs",
+            "dfs.datanode.address" : "0.0.0.0:50010",
+            "dfs.datanode.balance.bandwidthPerSec" : "6250000",
+            "dfs.datanode.data.dir" : "/hadoop/hdfs/data",
+            "dfs.datanode.data.dir.perm" : "750",
+            "dfs.datanode.du.reserved" : "1073741824",
+            "dfs.datanode.failed.volumes.tolerated" : "0",
+            "dfs.datanode.http.address" : "0.0.0.0:50075",
+            "dfs.datanode.https.address" : "0.0.0.0:50475",
+            "dfs.datanode.ipc.address" : "0.0.0.0:8010",
+            "dfs.datanode.max.transfer.threads" : "4096",
+            "dfs.domain.socket.path" : "/var/lib/hadoop-hdfs/dn_socket",
+            "dfs.ha.automatic-failover.enabled" : "true",
+            "dfs.ha.fencing.methods" : "shell(/bin/true)",
+            "dfs.ha.namenodes.ha1" : "nn1,nn2",
+            "dfs.heartbeat.interval" : "3",
+            "dfs.hosts.exclude" : "/etc/hadoop/conf/dfs.exclude",
+            "dfs.http.policy" : "HTTP_ONLY",
+            "dfs.https.port" : "50470",
+            "dfs.journalnode.edits.dir" : "/hadoop/hdfs/journal",
+            "dfs.journalnode.http-address" : "0.0.0.0:8480",
+            "dfs.journalnode.https-address" : "0.0.0.0:8481",
+            "dfs.namenode.accesstime.precision" : "0",
+            "dfs.namenode.avoid.read.stale.datanode" : "true",
+            "dfs.namenode.avoid.write.stale.datanode" : "true",
+            "dfs.namenode.checkpoint.dir" : "/hadoop/hdfs/namesecondary",
+            "dfs.namenode.checkpoint.edits.dir" : "${dfs.namenode.checkpoint.dir}",
+            "dfs.namenode.checkpoint.period" : "21600",
+            "dfs.namenode.checkpoint.txns" : "1000000",
+            "dfs.namenode.handler.count" : "100",
+            "dfs.namenode.http-address" : "c6401.ambari.apache.org:50070",
+            "dfs.namenode.http-address.ha1.nn1" : "c6401.ambari.apache.org:50070",
+            "dfs.namenode.http-address.ha1.nn2" : "c6402.ambari.apache.org:50070",
+            "dfs.namenode.https-address" : "c6401.ambari.apache.org:50470",
+            "dfs.namenode.https-address.ha1.nn1" : "c6401.ambari.apache.org:50470",
+            "dfs.namenode.https-address.ha1.nn2" : "c6402.ambari.apache.org:50470",
+            "dfs.namenode.name.dir" : "/hadoop/hdfs/namenode",
+            "dfs.namenode.name.dir.restore" : "true",
+            "dfs.namenode.rpc-address.ha1.nn1" : "c6401.ambari.apache.org:8020",
+            "dfs.namenode.rpc-address.ha1.nn2" : "c6402.ambari.apache.org:8020",
+            "dfs.namenode.safemode.threshold-pct" : "0.99f",
+            "dfs.namenode.shared.edits.dir" : "qjournal://c6401.ambari.apache.org:8485;c6402.ambari.apache.org:8485;c6403.ambari.apache.org:8485/ha1",
+            "dfs.namenode.stale.datanode.interval" : "30000",
+            "dfs.namenode.startup.delay.block.deletion.sec" : "3600",
+            "dfs.namenode.write.stale.datanode.ratio" : "1.0f",
+            "dfs.nameservices" : "ha1",
+            "dfs.permissions.enabled" : "true",
+            "dfs.permissions.superusergroup" : "hdfs",
+            "dfs.replication" : "3",
+            "dfs.replication.max" : "50",
+            "dfs.support.append" : "true",
+            "dfs.webhdfs.enabled" : "true",
+            "fs.permissions.umask-mode" : "022"
+          },
+          "properties_attributes" : {
+            "final" : {
+              "dfs.support.append" : "true",
+              "dfs.namenode.http-address" : "true"
+            }
+          }
+        }
+      ],
+      "createtime" : 1426061710737,
+      "group_id" : -1,
+      "group_name" : "default",
+      "hosts" : [ ],
+      "is_current" : false,
+      "service_config_version" : 2,
+      "service_config_version_note" : "This configuration is created by Enable NameNode HA wizard",
+      "service_name" : "HDFS",
+      "user" : "admin"
+    },
+    {
+      "cluster_name" : "1",
+      "configurations" : [
+        {
+          "Config" : {
+            "cluster_name" : "1"
+          },
+          "type" : "hadoop-policy",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "security.admin.operations.protocol.acl" : "hadoop",
+            "security.client.datanode.protocol.acl" : "*",
+            "security.client.protocol.acl" : "*",
+            "security.datanode.protocol.acl" : "*",
+            "security.inter.datanode.protocol.acl" : "*",
+            "security.inter.tracker.protocol.acl" : "*",
+            "security.job.client.protocol.acl" : "*",
+            "security.job.task.protocol.acl" : "*",
+            "security.namenode.protocol.acl" : "*",
+            "security.refresh.policy.protocol.acl" : "hadoop",
+            "security.refresh.usertogroups.mappings.protocol.acl" : "hadoop"
+          },
+          "properties_attributes" : { }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "1"
+          },
+          "type" : "hdfs-log4j",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "content" : "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.l
 ogger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appende
 r.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.log
 ger}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit
 =false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# mapred audit logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.f
 ile}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\
 nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateChange=WARN"
+          },
+          "properties_attributes" : { }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "1"
+          },
+          "type" : "hadoop-env",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "content" : "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options append
 ed to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateSta
 mps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The followin
 g applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HA
 DOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share
 /java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP
 _OPTS\"",
+            "dfs.datanode.data.dir.mount.file" : "/etc/hadoop/conf/dfs_data_dir_mount.hist",
+            "dtnode_heapsize" : "1024m",
+            "hadoop_heapsize" : "1024",
+            "hadoop_pid_dir_prefix" : "/var/run/hadoop",
+            "hadoop_root_logger" : "INFO,RFA",
+            "hdfs_log_dir_prefix" : "/var/log/hadoop",
+            "hdfs_user" : "hdfs",
+            "namenode_heapsize" : "1024m",
+            "namenode_opt_maxnewsize" : "256m",
+            "namenode_opt_maxpermsize" : "256m",
+            "namenode_opt_newsize" : "256m",
+            "namenode_opt_permsize" : "128m",
+            "proxyuser_group" : "users"
+          },
+          "properties_attributes" : { }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "1"
+          },
+          "type" : "ranger-hdfs-plugin-properties",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "REPOSITORY_CONFIG_PASSWORD" : "hadoop",
+            "REPOSITORY_CONFIG_USERNAME" : "hadoop",
+            "SSL_KEYSTORE_FILE_PATH" : "/etc/hadoop/conf/ranger-plugin-keystore.jks",
+            "SSL_KEYSTORE_PASSWORD" : "myKeyFilePassword",
+            "SSL_TRUSTSTORE_FILE_PATH" : "/etc/hadoop/conf/ranger-plugin-truststore.jks",
+            "SSL_TRUSTSTORE_PASSWORD" : "changeit",
+            "XAAUDIT.DB.IS_ENABLED" : "true",
+            "XAAUDIT.HDFS.DESTINATION_DIRECTORY" : "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
+            "XAAUDIT.HDFS.DESTINTATION_FILE" : "%hostname%-audit.log",
+            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS" : "900",
+            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS" : "60",
+            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS" : "86400",
+            "XAAUDIT.HDFS.IS_ENABLED" : "false",
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY" : "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT" : "10",
+            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY" : "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE" : "%time:yyyyMMdd-HHmm.ss%.log",
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS" : "60",
+            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS" : "600",
+            "common.name.for.certificate" : "-",
+            "hadoop.rpc.protection" : "-",
+            "policy_user" : "ambari-qa",
+            "ranger-hdfs-plugin-enabled" : "No"
+          },
+          "properties_attributes" : { }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "1"
+          },
+          "type" : "core-site",
+          "tag" : "version1426061654803",
+          "version" : 2,
+          "properties" : {
+            "fs.defaultFS" : "hdfs://ha1",
+            "fs.trash.interval" : "360",
+            "ha.failover-controller.active-standby-elector.zk.op.retries" : "120",
+            "ha.zookeeper.quorum" : "c6401.ambari.apache.org:2181,c6402.ambari.apache.org:2181,c6403.ambari.apache.org:2181",
+            "hadoop.http.authentication.simple.anonymous.allowed" : "true",
+            "hadoop.proxyuser.hcat.groups" : "users",
+            "hadoop.proxyuser.hcat.hosts" : "c6402.ambari.apache.org",
+            "hadoop.proxyuser.hive.groups" : "users",
+            "hadoop.proxyuser.hive.hosts" : "c6402.ambari.apache.org",
+            "hadoop.security.auth_to_local" : "\n        DEFAULT",
+            "hadoop.security.authentication" : "simple",
+            "hadoop.security.authorization" : "false",
+            "io.compression.codecs" : "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec",
+            "io.file.buffer.size" : "131072",
+            "io.serializations" : "org.apache.hadoop.io.serializer.WritableSerialization",
+            "ipc.client.connect.max.retries" : "50",
+            "ipc.client.connection.maxidletime" : "30000",
+            "ipc.client.idlethreshold" : "8000",
+            "ipc.server.tcpnodelay" : "true",
+            "mapreduce.jobtracker.webinterface.trusted" : "false",
+            "proxyuser_group" : "users"
+          },
+          "properties_attributes" : {
+            "final" : {
+              "fs.defaultFS" : "true"
+            }
+          }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "1"
+          },
+          "type" : "hdfs-site",
+          "tag" : "version1426064855761",
+          "version" : 3,
+          "properties" : {
+            "dfs.block.access.token.enable" : "true",
+            "dfs.blockreport.initialDelay" : "120",
+            "dfs.blocksize" : "134217728",
+            "dfs.client.failover.proxy.provider.ha1" : "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider",
+            "dfs.client.read.shortcircuit" : "true",
+            "dfs.client.read.shortcircuit.streams.cache.size" : "4096",
+            "dfs.cluster.administrators" : " hdfs",
+            "dfs.datanode.address" : "0.0.0.0:50010",
+            "dfs.datanode.balance.bandwidthPerSec" : "6250000",
+            "dfs.datanode.data.dir" : "/hadoop/hdfs/data",
+            "dfs.datanode.data.dir.perm" : "750",
+            "dfs.datanode.du.reserved" : "1073741824",
+            "dfs.datanode.failed.volumes.tolerated" : "1",
+            "dfs.datanode.http.address" : "0.0.0.0:50075",
+            "dfs.datanode.https.address" : "0.0.0.0:50475",
+            "dfs.datanode.ipc.address" : "0.0.0.0:8010",
+            "dfs.datanode.max.transfer.threads" : "4096",
+            "dfs.domain.socket.path" : "/var/lib/hadoop-hdfs/dn_socket",
+            "dfs.ha.automatic-failover.enabled" : "true",
+            "dfs.ha.fencing.methods" : "shell(/bin/true)",
+            "dfs.ha.namenodes.ha1" : "nn1,nn2",
+            "dfs.heartbeat.interval" : "3",
+            "dfs.hosts.exclude" : "/etc/hadoop/conf/dfs.exclude",
+            "dfs.http.policy" : "HTTP_ONLY",
+            "dfs.https.port" : "50470",
+            "dfs.journalnode.edits.dir" : "/hadoop/hdfs/journal",
+            "dfs.journalnode.http-address" : "0.0.0.0:8480",
+            "dfs.journalnode.https-address" : "0.0.0.0:8481",
+            "dfs.namenode.accesstime.precision" : "0",
+            "dfs.namenode.avoid.read.stale.datanode" : "true",
+            "dfs.namenode.avoid.write.stale.datanode" : "true",
+            "dfs.namenode.checkpoint.dir" : "/hadoop/hdfs/namesecondary",
+            "dfs.namenode.checkpoint.edits.dir" : "${dfs.namenode.checkpoint.dir}",
+            "dfs.namenode.checkpoint.period" : "21600",
+            "dfs.namenode.checkpoint.txns" : "1000000",
+            "dfs.namenode.handler.count" : "100",
+            "dfs.namenode.http-address" : "c6401.ambari.apache.org:50070",
+            "dfs.namenode.http-address.ha1.nn1" : "c6401.ambari.apache.org:50070",
+            "dfs.namenode.http-address.ha1.nn2" : "c6402.ambari.apache.org:50070",
+            "dfs.namenode.https-address" : "c6401.ambari.apache.org:50470",
+            "dfs.namenode.https-address.ha1.nn1" : "c6401.ambari.apache.org:50470",
+            "dfs.namenode.https-address.ha1.nn2" : "c6402.ambari.apache.org:50470",
+            "dfs.namenode.name.dir" : "/hadoop/hdfs/namenode",
+            "dfs.namenode.name.dir.restore" : "true",
+            "dfs.namenode.rpc-address.ha1.nn1" : "c6401.ambari.apache.org:8020",
+            "dfs.namenode.rpc-address.ha1.nn2" : "c6402.ambari.apache.org:8020",
+            "dfs.namenode.safemode.threshold-pct" : "0.99f",
+            "dfs.namenode.shared.edits.dir" : "qjournal://c6401.ambari.apache.org:8485;c6402.ambari.apache.org:8485;c6403.ambari.apache.org:8485/ha1",
+            "dfs.namenode.stale.datanode.interval" : "30000",
+            "dfs.namenode.startup.delay.block.deletion.sec" : "3600",
+            "dfs.namenode.write.stale.datanode.ratio" : "1.0f",
+            "dfs.nameservices" : "ha1",
+            "dfs.permissions.enabled" : "true",
+            "dfs.permissions.superusergroup" : "hdfs",
+            "dfs.replication" : "3",
+            "dfs.replication.max" : "50",
+            "dfs.support.append" : "true",
+            "dfs.webhdfs.enabled" : "true",
+            "fs.permissions.umask-mode" : "022"
+          },
+          "properties_attributes" : {
+            "final" : {
+              "dfs.support.append" : "true",
+              "dfs.namenode.http-address" : "true"
+            }
+          }
+        }
+      ],
+      "createtime" : 1426064911622,
+      "group_id" : -1,
+      "group_name" : "default",
+      "hosts" : [ ],
+      "is_current" : true,
+      "service_config_version" : 3,
+      "service_config_version_note" : "",
+      "service_name" : "HDFS",
+      "user" : "admin"
+    },
+    {
+      "cluster_name" : "1",
+      "configurations" : [
+        {
+          "Config" : {
+            "cluster_name" : "1"
+          },
+          "type" : "hadoop-policy",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "security.admin.operations.protocol.acl" : "hadoop",
+            "security.client.datanode.protocol.acl" : "*",
+            "security.client.protocol.acl" : "*",
+            "security.datanode.protocol.acl" : "*",
+            "security.inter.datanode.protocol.acl" : "*",
+            "security.inter.tracker.protocol.acl" : "*",
+            "security.job.client.protocol.acl" : "*",
+            "security.job.task.protocol.acl" : "*",
+            "security.namenode.protocol.acl" : "*",
+            "security.refresh.policy.protocol.acl" : "hadoop",
+            "security.refresh.usertogroups.mappings.protocol.acl" : "hadoop"
+          },
+          "properties_attributes" : { }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "1"
+          },
+          "type" : "hdfs-log4j",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "content" : "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.l
 ogger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appende
 r.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.log
 ger}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit
 =false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# mapred audit logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.f
 ile}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\
 nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateChange=WARN"
+          },
+          "properties_attributes" : { }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "1"
+          },
+          "type" : "hdfs-site",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "dfs.block.access.token.enable" : "true",
+            "dfs.blockreport.initialDelay" : "120",
+            "dfs.blocksize" : "134217728",
+            "dfs.client.read.shortcircuit" : "true",
+            "dfs.client.read.shortcircuit.streams.cache.size" : "4096",
+            "dfs.cluster.administrators" : " hdfs",
+            "dfs.datanode.address" : "0.0.0.0:50010",
+            "dfs.datanode.balance.bandwidthPerSec" : "6250000",
+            "dfs.datanode.data.dir" : "/hadoop/hdfs/data",
+            "dfs.datanode.data.dir.perm" : "750",
+            "dfs.datanode.du.reserved" : "1073741824",
+            "dfs.datanode.failed.volumes.tolerated" : "0",
+            "dfs.datanode.http.address" : "0.0.0.0:50075",
+            "dfs.datanode.https.address" : "0.0.0.0:50475",
+            "dfs.datanode.ipc.address" : "0.0.0.0:8010",
+            "dfs.datanode.max.transfer.threads" : "4096",
+            "dfs.domain.socket.path" : "/var/lib/hadoop-hdfs/dn_socket",
+            "dfs.heartbeat.interval" : "3",
+            "dfs.hosts.exclude" : "/etc/hadoop/conf/dfs.exclude",
+            "dfs.http.policy" : "HTTP_ONLY",
+            "dfs.https.port" : "50470",
+            "dfs.journalnode.edits.dir" : "/hadoop/hdfs/journalnode",
+            "dfs.journalnode.http-address" : "0.0.0.0:8480",
+            "dfs.journalnode.https-address" : "0.0.0.0:8481",
+            "dfs.namenode.accesstime.precision" : "0",
+            "dfs.namenode.avoid.read.stale.datanode" : "true",
+            "dfs.namenode.avoid.write.stale.datanode" : "true",
+            "dfs.namenode.checkpoint.dir" : "/hadoop/hdfs/namesecondary",
+            "dfs.namenode.checkpoint.edits.dir" : "${dfs.namenode.checkpoint.dir}",
+            "dfs.namenode.checkpoint.period" : "21600",
+            "dfs.namenode.checkpoint.txns" : "1000000",
+            "dfs.namenode.handler.count" : "100",
+            "dfs.namenode.http-address" : "c6401.ambari.apache.org:50070",
+            "dfs.namenode.https-address" : "c6401.ambari.apache.org:50470",
+            "dfs.namenode.name.dir" : "/hadoop/hdfs/namenode",
+            "dfs.namenode.name.dir.restore" : "true",
+            "dfs.namenode.safemode.threshold-pct" : "1.0f",
+            "dfs.namenode.secondary.http-address" : "c6402.ambari.apache.org:50090",
+            "dfs.namenode.stale.datanode.interval" : "30000",
+            "dfs.namenode.startup.delay.block.deletion.sec" : "3600",
+            "dfs.namenode.write.stale.datanode.ratio" : "1.0f",
+            "dfs.permissions.enabled" : "true",
+            "dfs.permissions.superusergroup" : "hdfs",
+            "dfs.replication" : "3",
+            "dfs.replication.max" : "50",
+            "dfs.support.append" : "true",
+            "dfs.webhdfs.enabled" : "true",
+            "fs.permissions.umask-mode" : "022"
+          },
+          "properties_attributes" : {
+            "final" : {
+              "dfs.support.append" : "true",
+              "dfs.namenode.http-address" : "true"
+            }
+          }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "1"
+          },
+          "type" : "hadoop-env",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "content" : "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options append
 ed to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateSta
 mps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The followin
 g applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HA
 DOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share
 /java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP
 _OPTS\"",
+            "dfs.datanode.data.dir.mount.file" : "/etc/hadoop/conf/dfs_data_dir_mount.hist",
+            "dtnode_heapsize" : "1024m",
+            "hadoop_heapsize" : "1024",
+            "hadoop_pid_dir_prefix" : "/var/run/hadoop",
+            "hadoop_root_logger" : "INFO,RFA",
+            "hdfs_log_dir_prefix" : "/var/log/hadoop",
+            "hdfs_user" : "hdfs",
+            "namenode_heapsize" : "1024m",
+            "namenode_opt_maxnewsize" : "256m",
+            "namenode_opt_maxpermsize" : "256m",
+            "namenode_opt_newsize" : "256m",
+            "namenode_opt_permsize" : "128m",
+            "proxyuser_group" : "users"
+          },
+          "properties_attributes" : { }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "1"
+          },
+          "type" : "ranger-hdfs-plugin-properties",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "REPOSITORY_CONFIG_PASSWORD" : "hadoop",
+            "REPOSITORY_CONFIG_USERNAME" : "hadoop",
+            "SSL_KEYSTORE_FILE_PATH" : "/etc/hadoop/conf/ranger-plugin-keystore.jks",
+            "SSL_KEYSTORE_PASSWORD" : "myKeyFilePassword",
+            "SSL_TRUSTSTORE_FILE_PATH" : "/etc/hadoop/conf/ranger-plugin-truststore.jks",
+            "SSL_TRUSTSTORE_PASSWORD" : "changeit",
+            "XAAUDIT.DB.IS_ENABLED" : "true",
+            "XAAUDIT.HDFS.DESTINATION_DIRECTORY" : "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
+            "XAAUDIT.HDFS.DESTINTATION_FILE" : "%hostname%-audit.log",
+            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS" : "900",
+            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS" : "60",
+            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS" : "86400",
+            "XAAUDIT.HDFS.IS_ENABLED" : "false",
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY" : "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT" : "10",
+            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY" : "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE" : "%time:yyyyMMdd-HHmm.ss%.log",
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS" : "60",
+            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS" : "600",
+            "common.name.for.certificate" : "-",
+            "hadoop.rpc.protection" : "-",
+            "policy_user" : "ambari-qa",
+            "ranger-hdfs-plugin-enabled" : "No"
+          },
+          "properties_attributes" : { }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "1"
+          },
+          "type" : "core-site",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "fs.defaultFS" : "hdfs://c6401.ambari.apache.org:8020",
+            "fs.trash.interval" : "360",
+            "ha.failover-controller.active-standby-elector.zk.op.retries" : "120",
+            "hadoop.http.authentication.simple.anonymous.allowed" : "true",
+            "hadoop.proxyuser.hcat.groups" : "users",
+            "hadoop.proxyuser.hcat.hosts" : "c6402.ambari.apache.org",
+            "hadoop.proxyuser.hive.groups" : "users",
+            "hadoop.proxyuser.hive.hosts" : "c6402.ambari.apache.org",
+            "hadoop.security.auth_to_local" : "\n        DEFAULT",
+            "hadoop.security.authentication" : "simple",
+            "hadoop.security.authorization" : "false",
+            "io.compression.codecs" : "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec",
+            "io.file.buffer.size" : "131072",
+            "io.serializations" : "org.apache.hadoop.io.serializer.WritableSerialization",
+            "ipc.client.connect.max.retries" : "50",
+            "ipc.client.connection.maxidletime" : "30000",
+            "ipc.client.idlethreshold" : "8000",
+            "ipc.server.tcpnodelay" : "true",
+            "mapreduce.jobtracker.webinterface.trusted" : "false",
+            "proxyuser_group" : "users"
+          },
+          "properties_attributes" : {
+            "final" : {
+              "fs.defaultFS" : "true"
+            }
+          }
+        }
+      ],
+      "createtime" : 1425979245514,
+      "group_id" : -1,
+      "group_name" : "default",
+      "hosts" : [ ],
+      "is_current" : false,
+      "service_config_version" : 1,
+      "service_config_version_note" : "Initial configurations for HDFS",
+      "service_name" : "HDFS",
+      "user" : "admin"
+    },
+    {
+      "cluster_name" : "1",
+      "configurations" : [
+        {
+          "Config" : {
+            "cluster_name" : "1"
+          },
+          "type" : "hiveserver2-site",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "hive.security.authenticator.manager" : "org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator",
+            "hive.security.authorization.manager" : "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"
+          },
+          "properties_attributes" : { }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "1"
+          },
+          "type" : "hive-env",
+          "tag" : "version1",
+         

<TRUNCATED>

Mime
View raw message