ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jai...@apache.org
Subject [2/2] git commit: AMBARI-3717: Config Refactor: Properties with hostnames in their values should also be surfaced on web-ui. (jaimin)
Date Thu, 07 Nov 2013 22:50:44 GMT
AMBARI-3717: Config Refactor: Properties with hostnames in their values should also be surfaced on web-ui. (jaimin)


Project: http://git-wip-us.apache.org/repos/asf/incubator-ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-ambari/commit/be01dee8
Tree: http://git-wip-us.apache.org/repos/asf/incubator-ambari/tree/be01dee8
Diff: http://git-wip-us.apache.org/repos/asf/incubator-ambari/diff/be01dee8

Branch: refs/heads/trunk
Commit: be01dee8049ec36edcd486388f33cac8b6d1fb11
Parents: 4f6bacd
Author: Jaimin Jetly <jaimin@hortonworks.com>
Authored: Thu Nov 7 14:49:53 2013 -0800
Committer: Jaimin Jetly <jaimin@hortonworks.com>
Committed: Thu Nov 7 14:50:08 2013 -0800

----------------------------------------------------------------------
 .../services/HBASE/configuration/hbase-site.xml |   2 +-
 .../services/HDFS/configuration/core-site.xml   |   2 +-
 .../services/HDFS/configuration/hdfs-site.xml   |  10 +-
 .../services/HIVE/configuration/hive-site.xml   |   2 +-
 .../MAPREDUCE/configuration/mapred-site.xml     |  37 +-
 .../WEBHCAT/configuration/webhcat-site.xml      |   2 +-
 .../services/HBASE/configuration/hbase-site.xml |   2 +-
 .../services/HDFS/configuration/core-site.xml   |   2 +-
 .../services/HDFS/configuration/hdfs-site.xml   |  10 +-
 .../services/HIVE/configuration/hive-site.xml   |   2 +-
 .../MAPREDUCE/configuration/mapred-site.xml     |  26 +-
 .../WEBHCAT/configuration/webhcat-site.xml      |   2 +-
 .../services/HBASE/configuration/hbase-site.xml |   2 +-
 .../services/HDFS/configuration/core-site.xml   |   2 +-
 .../services/HDFS/configuration/hdfs-site.xml   |  10 +-
 .../services/HIVE/configuration/hive-site.xml   |   2 +-
 .../WEBHCAT/configuration/webhcat-site.xml      |   2 +-
 .../services/YARN/configuration/yarn-site.xml   |   6 +
 .../services/HBASE/configuration/hbase-site.xml |   2 +-
 .../services/HDFS/configuration/core-site.xml   |   2 +-
 .../services/HDFS/configuration/hdfs-site.xml   |  10 +-
 .../services/HIVE/configuration/hive-site.xml   |   2 +-
 .../MAPREDUCE/configuration/mapred-site.xml     | 541 ++++++++++---------
 .../WEBHCAT/configuration/webhcat-site.xml      |  20 +-
 .../services/HBASE/configuration/hbase-site.xml |   2 +-
 .../services/HDFS/configuration/core-site.xml   |   2 +-
 .../services/HDFS/configuration/hdfs-site.xml   |  10 +-
 .../services/HIVE/configuration/hive-site.xml   |   2 +-
 .../MAPREDUCE/configuration/mapred-site.xml     | 540 +++++++++---------
 .../WEBHCAT/configuration/webhcat-site.xml      |  20 +-
 .../services/HBASE/configuration/hbase-site.xml |   2 +-
 .../services/HDFS/configuration/core-site.xml   | 162 +++---
 .../services/HDFS/configuration/hdfs-site.xml   |  10 +-
 .../services/HIVE/configuration/hive-site.xml   |   2 +-
 .../WEBHCAT/configuration/webhcat-site.xml      |  23 +-
 .../services/YARN/configuration/yarn-site.xml   |   6 +
 ambari-web/app/data/HDP2/config_mapping.js      | 115 ----
 ambari-web/app/data/HDP2/global_properties.js   |  13 -
 ambari-web/app/data/config_mapping.js           | 103 ----
 ambari-web/app/data/global_properties.js        |  50 --
 ambari-web/app/data/site_properties.js          |  25 +-
 ambari-web/app/models/service_config.js         | 102 +++-
 42 files changed, 917 insertions(+), 972 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/be01dee8/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/configuration/hbase-site.xml
index e002d6b..1ce5ccb 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/configuration/hbase-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/configuration/hbase-site.xml
@@ -306,7 +306,7 @@
   -->
   <property>
     <name>hbase.zookeeper.quorum</name>
-    <value></value>
+    <value>localhost</value>
     <description>Comma separated list of servers in the ZooKeeper Quorum.
     For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
     By default this is set to localhost for local and pseudo-distributed modes

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/be01dee8/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/core-site.xml
index 8cf73f5..8c43295 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/core-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/core-site.xml
@@ -58,7 +58,7 @@
   <property>
     <name>fs.default.name</name>
     <!-- cluster variant -->
-    <value></value>
+    <value>hdfs://localhost:8020</value>
     <description>The name of the default file system.  Either the
   literal string "local" or a host:port for NDFS.</description>
     <final>true</final>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/be01dee8/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hdfs-site.xml
index 989b92d..1a0b933 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hdfs-site.xml
@@ -85,7 +85,7 @@
 
   <property>
     <name>dfs.hosts.exclude</name>
-    <value></value>
+    <value>/etc/hadoop/conf/dfs.exclude</value>
     <description>Names a file that contains a list of hosts that are
     not permitted to connect to the namenode.  The full pathname of the
     file must be specified.  If the value is empty, no hosts are
@@ -94,7 +94,7 @@
 
   <property>
     <name>dfs.hosts</name>
-    <value></value>
+    <value>/etc/hadoop/conf/dfs.include</value>
     <description>Names a file that contains a list of hosts that are
     permitted to connect to the namenode. The full pathname of the file
     must be specified.  If the value is empty, all hosts are
@@ -168,7 +168,7 @@
 
   <property>
     <name>dfs.http.address</name>
-    <value></value>
+    <value>localhost:50070</value>
 <description>The name of the default file system.  Either the
 literal string "local" or a host:port for NDFS.</description>
 <final>true</final>
@@ -309,7 +309,7 @@ Kerberos principal name for the NameNode
   <property>
     <!-- cluster variant -->
     <name>dfs.secondary.http.address</name>
-    <value></value>
+    <value>localhost:50090</value>
     <description>Address of secondary namenode web server</description>
   </property>
 
@@ -379,7 +379,7 @@ Kerberos principal name for the NameNode
 
   <property>
     <name>dfs.https.address</name>
-    <value></value>
+    <value>localhost:50470</value>
   <description>The https address where namenode binds</description>
 
   </property>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/be01dee8/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-site.xml
index 1e17d7f..9d0b168 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-site.xml
@@ -77,7 +77,7 @@ limitations under the License.
 
   <property>
     <name>hive.metastore.uris</name>
-    <value></value>
+    <value>thrift://localhost:9083</value>
     <description>URI for client to contact metastore server</description>
   </property>
 

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/be01dee8/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapred-site.xml
index fffcdb2..9ef7eaa 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapred-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapred-site.xml
@@ -82,19 +82,20 @@
   <property>
     <name>mapred.job.tracker</name>
     <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
+    <value>localhost:50300</value>
+    <description>JobTracker address</description>
     <final>true</final>
   </property>
 
   <property>
     <name>mapred.job.tracker.http.address</name>
     <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
+    <value>localhost:50030</value>
+    <description>Http address for JobTracker</description>
     <final>true</final>
   </property>
 
+
   <property>
     <!-- cluster specific -->
     <name>mapred.local.dir</name>
@@ -324,14 +325,24 @@
     </description>
   </property>
 
-<property>
-  <name>mapred.hosts</name>
-  <value></value>
-</property>
+  <property>
+    <name>mapred.hosts</name>
+    <value>/etc/hadoop/conf/mapred.include</value>
+    <description>
+      Names a file that contains the list of nodes that may
+      connect to the jobtracker.  If the value is empty, all hosts are
+      permitted.
+    </description>
+  </property>
 
 <property>
   <name>mapred.hosts.exclude</name>
-  <value></value>
+  <value>/etc/hadoop/conf/mapred.exclude</value>
+  <description>
+    Names a file that contains the list of hosts that
+    should be excluded by the jobtracker.  If the value is empty, no
+    hosts are excluded.
+  </description>
 </property>
 
 <property>
@@ -380,10 +391,10 @@
 
 <property>
   <name>mapred.job.tracker.persist.jobstatus.dir</name>
-  <value></value>
+  <value>/etc/hadoop/conf/health_check</value>
   <description>The directory where the job status information is persisted
-   in a file system to be available after it drops of the memory queue and
-   between jobtracker restarts.
+    in a file system to be available after it drops of the memory queue and
+    between jobtracker restarts.
   </description>
 </property>
 
@@ -518,7 +529,7 @@ process</description>
   <property>
     <name>mapreduce.history.server.http.address</name>
     <!-- cluster variant -->
-    <value></value>
+    <value>localhost:51111</value>
     <description>Http address of the history server</description>
     <final>true</final>
   </property>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/be01dee8/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/configuration/webhcat-site.xml
index 31d0113..16d8691 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/configuration/webhcat-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/configuration/webhcat-site.xml
@@ -93,7 +93,7 @@ limitations under the License.
 
   <property>
     <name>templeton.zookeeper.hosts</name>
-    <value></value>
+    <value>localhost:2181</value>
     <description>ZooKeeper servers, as comma separated host:port pairs</description>
   </property>
 

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/be01dee8/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/configuration/hbase-site.xml
index e002d6b..1ce5ccb 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/configuration/hbase-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/configuration/hbase-site.xml
@@ -306,7 +306,7 @@
   -->
   <property>
     <name>hbase.zookeeper.quorum</name>
-    <value></value>
+    <value>localhost</value>
     <description>Comma separated list of servers in the ZooKeeper Quorum.
     For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
     By default this is set to localhost for local and pseudo-distributed modes

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/be01dee8/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/core-site.xml
index 8cf73f5..8c43295 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/core-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/core-site.xml
@@ -58,7 +58,7 @@
   <property>
     <name>fs.default.name</name>
     <!-- cluster variant -->
-    <value></value>
+    <value>hdfs://localhost:8020</value>
     <description>The name of the default file system.  Either the
   literal string "local" or a host:port for NDFS.</description>
     <final>true</final>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/be01dee8/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hdfs-site.xml
index 989b92d..1a0b933 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hdfs-site.xml
@@ -85,7 +85,7 @@
 
   <property>
     <name>dfs.hosts.exclude</name>
-    <value></value>
+    <value>/etc/hadoop/conf/dfs.exclude</value>
     <description>Names a file that contains a list of hosts that are
     not permitted to connect to the namenode.  The full pathname of the
     file must be specified.  If the value is empty, no hosts are
@@ -94,7 +94,7 @@
 
   <property>
     <name>dfs.hosts</name>
-    <value></value>
+    <value>/etc/hadoop/conf/dfs.include</value>
     <description>Names a file that contains a list of hosts that are
     permitted to connect to the namenode. The full pathname of the file
     must be specified.  If the value is empty, all hosts are
@@ -168,7 +168,7 @@
 
   <property>
     <name>dfs.http.address</name>
-    <value></value>
+    <value>localhost:50070</value>
 <description>The name of the default file system.  Either the
 literal string "local" or a host:port for NDFS.</description>
 <final>true</final>
@@ -309,7 +309,7 @@ Kerberos principal name for the NameNode
   <property>
     <!-- cluster variant -->
     <name>dfs.secondary.http.address</name>
-    <value></value>
+    <value>localhost:50090</value>
     <description>Address of secondary namenode web server</description>
   </property>
 
@@ -379,7 +379,7 @@ Kerberos principal name for the NameNode
 
   <property>
     <name>dfs.https.address</name>
-    <value></value>
+    <value>localhost:50470</value>
   <description>The https address where namenode binds</description>
 
   </property>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/be01dee8/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/hive-site.xml
index 1e17d7f..9d0b168 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/hive-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/hive-site.xml
@@ -77,7 +77,7 @@ limitations under the License.
 
   <property>
     <name>hive.metastore.uris</name>
-    <value></value>
+    <value>thrift://localhost:9083</value>
     <description>URI for client to contact metastore server</description>
   </property>
 

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/be01dee8/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml
index fffcdb2..567c1e1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml
@@ -82,16 +82,16 @@
   <property>
     <name>mapred.job.tracker</name>
     <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
+    <value>localhost:50300</value>
+    <description>JobTracker address</description>
     <final>true</final>
   </property>
 
   <property>
     <name>mapred.job.tracker.http.address</name>
     <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
+    <value>localhost:50030</value>
+    <description>JobTracker host and http port address</description>
     <final>true</final>
   </property>
 
@@ -326,12 +326,22 @@
 
 <property>
   <name>mapred.hosts</name>
-  <value></value>
+  <value>/etc/hadoop/conf/mapred.include</value>
+  <description>
+    Names a file that contains the list of nodes that may
+    connect to the jobtracker.  If the value is empty, all hosts are
+    permitted.
+  </description>
 </property>
 
 <property>
   <name>mapred.hosts.exclude</name>
-  <value></value>
+  <value>/etc/hadoop/conf/mapred.exclude</value>
+  <description>
+    Names a file that contains the list of hosts that
+    should be excluded by the jobtracker.  If the value is empty, no
+    hosts are excluded.
+  </description>
 </property>
 
 <property>
@@ -380,7 +390,7 @@
 
 <property>
   <name>mapred.job.tracker.persist.jobstatus.dir</name>
-  <value></value>
+  <value>/etc/hadoop/conf/health_check</value>
   <description>The directory where the job status information is persisted
    in a file system to be available after it drops of the memory queue and
    between jobtracker restarts.
@@ -518,7 +528,7 @@ process</description>
   <property>
     <name>mapreduce.history.server.http.address</name>
     <!-- cluster variant -->
-    <value></value>
+    <value>localhost:51111</value>
     <description>Http address of the history server</description>
     <final>true</final>
   </property>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/be01dee8/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/configuration/webhcat-site.xml
index 31d0113..16d8691 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/configuration/webhcat-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/configuration/webhcat-site.xml
@@ -93,7 +93,7 @@ limitations under the License.
 
   <property>
     <name>templeton.zookeeper.hosts</name>
-    <value></value>
+    <value>localhost:2181</value>
     <description>ZooKeeper servers, as comma separated host:port pairs</description>
   </property>
 

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/be01dee8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-site.xml
index 375c7af..b05338d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-site.xml
@@ -309,7 +309,7 @@
   -->
   <property>
     <name>hbase.zookeeper.quorum</name>
-    <value></value>
+    <value>localhost</value>
     <description>Comma separated list of servers in the ZooKeeper Quorum.
     For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
     By default this is set to localhost for local and pseudo-distributed modes

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/be01dee8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/core-site.xml
index 7848da3..e244fc7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/core-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/core-site.xml
@@ -50,7 +50,7 @@
   <property>
     <name>fs.defaultFS</name>
     <!-- cluster variant -->
-    <value></value>
+    <value>hdfs://localhost:8020</value>
     <description>The name of the default file system.  Either the
   literal string "local" or a host:port for NDFS.</description>
     <final>true</final>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/be01dee8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-site.xml
index df45e1b..bd3b12a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-site.xml
@@ -70,7 +70,7 @@
 
   <property>
     <name>dfs.hosts.exclude</name>
-    <value></value>
+    <value>/etc/hadoop/conf/dfs.exclude</value>
     <description>Names a file that contains a list of hosts that are
     not permitted to connect to the namenode.  The full pathname of the
     file must be specified.  If the value is empty, no hosts are
@@ -80,7 +80,7 @@
 <!--
   <property>
     <name>dfs.hosts</name>
-    <value></value>
+    <value>/etc/hadoop/conf/dfs.include</value>
     <description>Names a file that contains a list of hosts that are
     permitted to connect to the namenode. The full pathname of the file
     must be specified.  If the value is empty, all hosts are
@@ -198,7 +198,7 @@
 
   <property>
     <name>dfs.namenode.http-address</name>
-    <value></value>
+    <value>localhost:50070</value>
 <description>The name of the default file system.  Either the
 literal string "local" or a host:port for NDFS.</description>
 <final>true</final>
@@ -319,7 +319,7 @@ Kerberos principal name for the NameNode
   <property>
     <!-- cluster variant -->
     <name>dfs.namenode.secondary.http-address</name>
-    <value></value>
+    <value>localhost:50090</value>
     <description>Address of secondary namenode web server</description>
   </property>
 
@@ -376,7 +376,7 @@ Kerberos principal name for the NameNode
 
   <property>
     <name>dfs.namenode.https-address</name>
-    <value></value>
+    <value>localhost:50470</value>
   <description>The https address where namenode binds</description>
 
   </property>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/be01dee8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-site.xml
index eb57179..7fb8969 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-site.xml
@@ -77,7 +77,7 @@ limitations under the License.
 
   <property>
     <name>hive.metastore.uris</name>
-    <value></value>
+    <value>thrift://localhost:9083</value>
     <description>URI for client to contact metastore server</description>
   </property>
 

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/be01dee8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/configuration/webhcat-site.xml
index 95a139f..39b901e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/configuration/webhcat-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/configuration/webhcat-site.xml
@@ -93,7 +93,7 @@ limitations under the License.
 
   <property>
     <name>templeton.zookeeper.hosts</name>
-    <value></value>
+    <value>localhost:2181</value>
     <description>ZooKeeper servers, as comma separated host:port pairs</description>
   </property>
 

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/be01dee8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/yarn-site.xml
index 2c16e94..05e23a9 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/yarn-site.xml
@@ -24,6 +24,12 @@
   <!-- ResourceManager -->
 
   <property>
+    <name>yarn.resourcemanager.hostname</name>
+    <value>localhost</value>
+    <description>The hostname of the RM.</description>
+  </property>
+
+  <property>
     <name>yarn.resourcemanager.resource-tracker.address</name>
     <value>localhost:8025</value>
   </property>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/be01dee8/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HBASE/configuration/hbase-site.xml
index 9d44de8..ead0c52 100644
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HBASE/configuration/hbase-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HBASE/configuration/hbase-site.xml
@@ -306,7 +306,7 @@
   -->
   <property>
     <name>hbase.zookeeper.quorum</name>
-    <value></value>
+    <value>localhost</value>
     <description>Comma separated list of servers in the ZooKeeper Quorum.
       For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
       By default this is set to localhost for local and pseudo-distributed modes

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/be01dee8/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/core-site.xml
index afa7338..731d984 100644
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/core-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/core-site.xml
@@ -58,7 +58,7 @@
   <property>
     <name>fs.default.name</name>
     <!-- cluster variant -->
-    <value></value>
+    <value>hdfs://localhost:8020</value>
     <description>The name of the default file system.  Either the
       literal string "local" or a host:port for NDFS.</description>
     <final>true</final>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/be01dee8/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/hdfs-site.xml
index 5cb7e4a..15666e0 100644
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/hdfs-site.xml
@@ -85,7 +85,7 @@
 
   <property>
     <name>dfs.hosts.exclude</name>
-    <value></value>
+    <value>/etc/hadoop/conf/dfs.exclude</value>
     <description>Names a file that contains a list of hosts that are
       not permitted to connect to the namenode.  The full pathname of the
       file must be specified.  If the value is empty, no hosts are
@@ -94,7 +94,7 @@
 
   <property>
     <name>dfs.hosts</name>
-    <value></value>
+    <value>/etc/hadoop/conf/dfs.include</value>
     <description>Names a file that contains a list of hosts that are
       permitted to connect to the namenode. The full pathname of the file
       must be specified.  If the value is empty, all hosts are
@@ -168,7 +168,7 @@
 
   <property>
     <name>dfs.http.address</name>
-    <value></value>
+    <value>localhost:50070</value>
     <description>The name of the default file system.  Either the
       literal string "local" or a host:port for NDFS.</description>
     <final>true</final>
@@ -309,7 +309,7 @@
   <property>
     <!-- cluster variant -->
     <name>dfs.secondary.http.address</name>
-    <value></value>
+    <value>localhost:50090</value>
     <description>Address of secondary namenode web server</description>
   </property>
 
@@ -379,7 +379,7 @@
 
   <property>
     <name>dfs.https.address</name>
-    <value></value>
+    <value>localhost:50470</value>
     <description>The https address where namenode binds</description>
 
   </property>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/be01dee8/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/configuration/hive-site.xml
index 5a42279..1337fa4 100644
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/configuration/hive-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/configuration/hive-site.xml
@@ -77,7 +77,7 @@ limitations under the License.
 
   <property>
     <name>hive.metastore.uris</name>
-    <value></value>
+    <value>thrift://localhost:9083</value>
     <description>URI for client to contact metastore server</description>
   </property>
 

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/be01dee8/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/configuration/mapred-site.xml
index df6ca71..743ca6a 100644
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/configuration/mapred-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/configuration/mapred-site.xml
@@ -22,12 +22,14 @@
 
 <configuration xmlns:xi="http://www.w3.org/2001/XInclude">
 
-<!-- i/o properties -->
+  <!-- i/o properties -->
 
   <property>
     <name>io.sort.mb</name>
-    <value></value>
-    <description>No description</description>
+    <value>200</value>
+    <description>
+      The total amount of Map-side buffer memory to use while sorting files
+    </description>
   </property>
 
   <property>
@@ -38,8 +40,8 @@
 
   <property>
     <name>io.sort.spill.percent</name>
-    <value></value>
-    <description>No description</description>
+    <value>0.9</value>
+    <description>Percentage of sort buffer used for record collection</description>
   </property>
 
   <property>
@@ -48,62 +50,63 @@
     <description>No description</description>
   </property>
 
-<!-- map/reduce properties -->
+  <!-- map/reduce properties -->
 
-<property>
-  <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
-  <value>250</value>
-  <description>Normally, this is the amount of time before killing
-  processes, and the recommended-default is 5.000 seconds - a value of
-  5000 here.  In this case, we are using it solely to blast tasks before
-  killing them, and killing them very quickly (1/4 second) to guarantee
-  that we do not leave VMs around for later jobs.
-  </description>
-</property>
+  <property>
+    <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
+    <value>250</value>
+    <description>Normally, this is the amount of time before killing
+      processes, and the recommended-default is 5.000 seconds - a value of
+      5000 here.  In this case, we are using it solely to blast tasks before
+      killing them, and killing them very quickly (1/4 second) to guarantee
+      that we do not leave VMs around for later jobs.
+    </description>
+  </property>
 
   <property>
     <name>mapred.job.tracker.handler.count</name>
     <value>50</value>
     <description>
-    The number of server threads for the JobTracker. This should be roughly
-    4% of the number of tasktracker nodes.
+      The number of server threads for the JobTracker. This should be roughly
+      4% of the number of tasktracker nodes.
     </description>
   </property>
 
   <property>
     <name>mapred.system.dir</name>
     <value>/mapred/system</value>
-    <description>No description</description>
+    <description>Path on the HDFS where where the MapReduce framework stores system files</description>
     <final>true</final>
   </property>
 
   <property>
     <name>mapred.job.tracker</name>
     <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
+    <value>localhost:50300</value>
+    <description>JobTracker address</description>
     <final>true</final>
   </property>
 
   <property>
     <name>mapred.job.tracker.http.address</name>
     <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
+    <value>localhost:50030</value>
+    <description>Http address for JobTracker</description>
     <final>true</final>
   </property>
 
+
   <property>
     <!-- cluster specific -->
     <name>mapred.local.dir</name>
-    <value></value>
+    <value>/hadoop/mapred</value>
     <description>No description</description>
     <final>true</final>
   </property>
 
   <property>
-  <name>mapreduce.cluster.administrators</name>
-  <value> hadoop</value>
+    <name>mapreduce.cluster.administrators</name>
+    <value> hadoop</value>
   </property>
 
   <property>
@@ -114,13 +117,13 @@
 
   <property>
     <name>mapred.tasktracker.map.tasks.maximum</name>
-    <value></value>
+    <value>4</value>
     <description>No description</description>
   </property>
 
   <property>
     <name>mapred.tasktracker.reduce.tasks.maximum</name>
-    <value></value>
+    <value>2</value>
     <description>No description</description>
   </property>
 
@@ -133,14 +136,14 @@
     <name>mapred.map.tasks.speculative.execution</name>
     <value>false</value>
     <description>If true, then multiple instances of some map tasks
-               may be executed in parallel.</description>
+      may be executed in parallel.</description>
   </property>
 
   <property>
     <name>mapred.reduce.tasks.speculative.execution</name>
     <value>false</value>
     <description>If true, then multiple instances of some reduce tasks
-               may be executed in parallel.</description>
+      may be executed in parallel.</description>
   </property>
 
   <property>
@@ -152,29 +155,29 @@
     <name>mapred.inmem.merge.threshold</name>
     <value>1000</value>
     <description>The threshold, in terms of the number of files
-  for the in-memory merge process. When we accumulate threshold number of files
-  we initiate the in-memory merge and spill to disk. A value of 0 or less than
-  0 indicates we want to DON'T have any threshold and instead depend only on
-  the ramfs's memory consumption to trigger the merge.
-  </description>
+      for the in-memory merge process. When we accumulate threshold number of files
+      we initiate the in-memory merge and spill to disk. A value of 0 or less than
+      0 indicates we want to DON'T have any threshold and instead depend only on
+      the ramfs's memory consumption to trigger the merge.
+    </description>
   </property>
 
   <property>
     <name>mapred.job.shuffle.merge.percent</name>
     <value>0.66</value>
     <description>The usage threshold at which an in-memory merge will be
-  initiated, expressed as a percentage of the total memory allocated to
-  storing in-memory map outputs, as defined by
-  mapred.job.shuffle.input.buffer.percent.
-  </description>
+      initiated, expressed as a percentage of the total memory allocated to
+      storing in-memory map outputs, as defined by
+      mapred.job.shuffle.input.buffer.percent.
+    </description>
   </property>
 
   <property>
     <name>mapred.job.shuffle.input.buffer.percent</name>
     <value>0.7</value>
     <description>The percentage of memory to be allocated from the maximum heap
-  size to storing map outputs during the shuffle.
-  </description>
+      size to storing map outputs during the shuffle.
+    </description>
   </property>
 
   <property>
@@ -185,13 +188,13 @@
     </description>
   </property>
 
-<property>
-  <name>mapred.output.compression.type</name>
-  <value>BLOCK</value>
-  <description>If the job outputs are to compressed as SequenceFiles, how should
-               they be compressed? Should be one of NONE, RECORD or BLOCK.
-  </description>
-</property>
+  <property>
+    <name>mapred.output.compression.type</name>
+    <value>BLOCK</value>
+    <description>If the job outputs are to compressed as SequenceFiles, how should
+      they be compressed? Should be one of NONE, RECORD or BLOCK.
+    </description>
+  </property>
 
 
   <property>
@@ -201,14 +204,14 @@
 
   <property>
     <name>mapred.jobtracker.taskScheduler</name>
-    <value></value>
+    <value>org.apache.hadoop.mapred.CapacityTaskScheduler</value>
   </property>
 
   <property>
     <name>mapred.jobtracker.restart.recover</name>
     <value>false</value>
     <description>"true" to enable (job) recovery upon restart,
-               "false" to start afresh
+      "false" to start afresh
     </description>
   </property>
 
@@ -216,20 +219,20 @@
     <name>mapred.job.reduce.input.buffer.percent</name>
     <value>0.0</value>
     <description>The percentage of memory- relative to the maximum heap size- to
-  retain map outputs during the reduce. When the shuffle is concluded, any
-  remaining map outputs in memory must consume less than this threshold before
-  the reduce can begin.
-  </description>
+      retain map outputs during the reduce. When the shuffle is concluded, any
+      remaining map outputs in memory must consume less than this threshold before
+      the reduce can begin.
+    </description>
   </property>
 
- <property>
-  <name>mapreduce.reduce.input.limit</name>
-  <value>10737418240</value>
-  <description>The limit on the input size of the reduce. (This value
-  is 10 Gb.)  If the estimated input size of the reduce is greater than
-  this value, job is failed. A value of -1 means that there is no limit
-  set. </description>
-</property>
+  <property>
+    <name>mapreduce.reduce.input.limit</name>
+    <value>10737418240</value>
+    <description>The limit on the input size of the reduce. (This value
+      is 10 Gb.)  If the estimated input size of the reduce is greater than
+      this value, job is failed. A value of -1 means that there is no limit
+      set. </description>
+  </property>
 
 
   <!-- copied from kryptonite configuration -->
@@ -243,9 +246,9 @@
     <name>mapred.task.timeout</name>
     <value>600000</value>
     <description>The number of milliseconds before a task will be
-  terminated if it neither reads an input, writes an output, nor
-  updates its status string.
-  </description>
+      terminated if it neither reads an input, writes an output, nor
+      updates its status string.
+    </description>
   </property>
 
   <property>
@@ -257,9 +260,9 @@
   <property>
     <name>mapred.task.tracker.task-controller</name>
     <value>org.apache.hadoop.mapred.DefaultTaskController</value>
-   <description>
-     TaskController which is used to launch and manage task execution.
-  </description>
+    <description>
+      TaskController which is used to launch and manage task execution.
+    </description>
   </property>
 
   <property>
@@ -277,158 +280,192 @@
   <property>
     <name>mapred.cluster.map.memory.mb</name>
     <value>1536</value>
+    <description>
+      The virtual memory size of a single Map slot in the MapReduce framework
+    </description>
   </property>
 
   <property>
     <name>mapred.cluster.reduce.memory.mb</name>
     <value>2048</value>
+    <description>
+      The virtual memory size of a single Reduce slot in the MapReduce framework
+    </description>
   </property>
 
   <property>
     <name>mapred.job.map.memory.mb</name>
     <value>1536</value>
+    <description>
+      Virtual memory for single Map task
+    </description>
   </property>
 
   <property>
     <name>mapred.job.reduce.memory.mb</name>
     <value>2048</value>
+    <description>
+      Virtual memory for single Reduce task
+    </description>
   </property>
 
   <property>
     <name>mapred.cluster.max.map.memory.mb</name>
     <value>6144</value>
+    <description>
+      Upper limit on virtual memory size for a single Map task of any MapReduce job
+    </description>
   </property>
 
   <property>
     <name>mapred.cluster.max.reduce.memory.mb</name>
     <value>4096</value>
+    <description>
+      Upper limit on virtual memory size for a single Reduce task of any MapReduce job
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.hosts</name>
+    <value>/etc/hadoop/conf/mapred.include</value>
+    <description>
+      Names a file that contains the list of nodes that may
+      connect to the jobtracker.  If the value is empty, all hosts are
+      permitted.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.hosts.exclude</name>
+    <value>/etc/hadoop/conf/mapred.exclude</value>
+    <description>
+      Names a file that contains the list of hosts that
+      should be excluded by the jobtracker.  If the value is empty, no
+      hosts are excluded.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.max.tracker.blacklists</name>
+    <value>16</value>
+    <description>
+      if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.healthChecker.script.path</name>
+    <value>file:////mapred/jobstatus</value>
+    <description>
+      Directory path to view job status
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.healthChecker.interval</name>
+    <value>135000</value>
+  </property>
+
+  <property>
+    <name>mapred.healthChecker.script.timeout</name>
+    <value>60000</value>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker.persist.jobstatus.active</name>
+    <value>false</value>
+    <description>Indicates if persistency of job status information is
+      active or not.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker.persist.jobstatus.hours</name>
+    <value>1</value>
+    <description>The number of hours job status information is persisted in DFS.
+      The job status information will be available after it drops of the memory
+      queue and between jobtracker restarts. With a zero value the job status
+      information is not persisted at all in DFS.
+    </description>
   </property>
 
-<property>
-  <name>mapred.hosts</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.hosts.exclude</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.max.tracker.blacklists</name>
-  <value>16</value>
-  <description>
-    if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
-  </description>
-</property>
-
-<property>
-  <name>mapred.healthChecker.script.path</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.healthChecker.interval</name>
-  <value>135000</value>
-</property>
-
-<property>
-  <name>mapred.healthChecker.script.timeout</name>
-  <value>60000</value>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.active</name>
-  <value>false</value>
-  <description>Indicates if persistency of job status information is
-  active or not.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.hours</name>
-  <value>1</value>
-  <description>The number of hours job status information is persisted in DFS.
-    The job status information will be available after it drops of the memory
-    queue and between jobtracker restarts. With a zero value the job status
-    information is not persisted at all in DFS.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.dir</name>
-  <value></value>
-  <description>The directory where the job status information is persisted
-   in a file system to be available after it drops of the memory queue and
-   between jobtracker restarts.
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.retirejob.check</name>
-  <value>10000</value>
-</property>
-
-<property>
-  <name>mapred.jobtracker.retirejob.interval</name>
-  <value>21600000</value>
-</property>
-
-<property>
-  <name>mapred.job.tracker.history.completed.location</name>
-  <value>/mapred/history/done</value>
-  <description>No description</description>
-</property>
-
-<property>
-  <name>mapred.task.maxvmem</name>
-  <value></value>
-  <final>true</final>
-   <description>No description</description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.maxtasks.per.job</name>
-  <value></value>
-  <final>true</final>
-  <description>The maximum number of tasks for a single job.
-  A value of -1 indicates that there is no maximum.  </description>
-</property>
-
-<property>
-  <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
-  <value>false</value>
-</property>
-
-<property>
-  <name>mapred.userlog.retain.hours</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.job.reuse.jvm.num.tasks</name>
-  <value>1</value>
-  <description>
-    How many tasks to run per jvm. If set to -1, there is no limit
-  </description>
-  <final>true</final>
-</property>
-
-<property>
-  <name>mapreduce.jobtracker.kerberos.principal</name>
-  <value></value>
-  <description>
+  <property>
+    <name>mapred.job.tracker.persist.jobstatus.dir</name>
+    <value>/etc/hadoop/conf/health_check</value>
+    <description>The directory where the job status information is persisted
+      in a file system to be available after it drops of the memory queue and
+      between jobtracker restarts.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.retirejob.check</name>
+    <value>10000</value>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.retirejob.interval</name>
+    <value>21600000</value>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker.history.completed.location</name>
+    <value>/mapred/history/done</value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>mapred.task.maxvmem</name>
+    <value></value>
+    <final>true</final>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.maxtasks.per.job</name>
+    <value>-1</value>
+    <final>true</final>
+    <description>The maximum number of tasks for a single job.
+      A value of -1 indicates that there is no maximum.  </description>
+  </property>
+
+  <property>
+    <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <name>mapred.userlog.retain.hours</name>
+    <value>24</value>
+    <description>
+      The maximum time, in hours, for which the user-logs are to be retained after the job completion.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.job.reuse.jvm.num.tasks</name>
+    <value>1</value>
+    <description>
+      How many tasks to run per jvm. If set to -1, there is no limit
+    </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>mapreduce.jobtracker.kerberos.principal</name>
+    <value></value>
+    <description>
       JT user name key.
- </description>
-</property>
+    </description>
+  </property>
 
-<property>
-  <name>mapreduce.tasktracker.kerberos.principal</name>
-   <value></value>
-  <description>
-       tt user name key. "_HOST" is replaced by the host name of the task tracker.
-   </description>
-</property>
+  <property>
+    <name>mapreduce.tasktracker.kerberos.principal</name>
+    <value></value>
+    <description>
+      tt user name key. "_HOST" is replaced by the host name of the task tracker.
+    </description>
+  </property>
 
 
   <property>
@@ -438,61 +475,61 @@
   </property>
 
 
- <property>
-   <name>mapreduce.jobtracker.keytab.file</name>
-   <value></value>
-   <description>
-       The keytab for the jobtracker principal.
-   </description>
+  <property>
+    <name>mapreduce.jobtracker.keytab.file</name>
+    <value></value>
+    <description>
+      The keytab for the jobtracker principal.
+    </description>
 
-</property>
+  </property>
 
- <property>
-   <name>mapreduce.tasktracker.keytab.file</name>
-   <value></value>
+  <property>
+    <name>mapreduce.tasktracker.keytab.file</name>
+    <value></value>
     <description>The filename of the keytab for the task tracker</description>
- </property>
+  </property>
 
- <property>
-   <name>mapred.task.tracker.http.address</name>
-   <value></value>
-   <description>Http address for task tracker.</description>
- </property>
+  <property>
+    <name>mapred.task.tracker.http.address</name>
+    <value></value>
+    <description>Http address for task tracker.</description>
+  </property>
 
- <property>
-   <name>mapreduce.jobtracker.staging.root.dir</name>
-   <value>/user</value>
- <description>The Path prefix for where the staging directories should be placed. The next level is always the user's
-   name. It is a path in the default file system.</description>
- </property>
+  <property>
+    <name>mapreduce.jobtracker.staging.root.dir</name>
+    <value>/user</value>
+    <description>The Path prefix for where the staging directories should be placed. The next level is always the user's
+      name. It is a path in the default file system.</description>
+  </property>
 
- <property>
-      <name>mapreduce.tasktracker.group</name>
-      <value>hadoop</value>
-      <description>The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.</description>
+  <property>
+    <name>mapreduce.tasktracker.group</name>
+    <value>hadoop</value>
+    <description>The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.</description>
 
- </property>
+  </property>
 
   <property>
     <name>mapreduce.jobtracker.split.metainfo.maxsize</name>
     <value>50000000</value>
     <final>true</final>
-     <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
-    initialize.
-   </description>
+    <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
+      initialize.
+    </description>
   </property>
   <property>
     <name>mapreduce.history.server.embedded</name>
     <value>false</value>
     <description>Should job history server be embedded within Job tracker
-process</description>
+      process</description>
     <final>true</final>
   </property>
 
   <property>
     <name>mapreduce.history.server.http.address</name>
     <!-- cluster variant -->
-    <value></value>
+    <value>localhost:51111</value>
     <description>Http address of the history server</description>
     <final>true</final>
   </property>
@@ -500,38 +537,38 @@ process</description>
   <property>
     <name>mapreduce.jobhistory.kerberos.principal</name>
     <!-- cluster variant -->
-  <value></value>
+    <value></value>
     <description>Job history user name key. (must map to same user as JT
-user)</description>
+      user)</description>
   </property>
 
- <property>
-   <name>mapreduce.jobhistory.keytab.file</name>
+  <property>
+    <name>mapreduce.jobhistory.keytab.file</name>
     <!-- cluster variant -->
-   <value></value>
-   <description>The keytab for the job history server principal.</description>
- </property>
-
-<property>
-  <name>mapred.jobtracker.blacklist.fault-timeout-window</name>
-  <value>180</value>
-  <description>
-    3-hour sliding window (value is in minutes)
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.blacklist.fault-bucket-width</name>
-  <value>15</value>
-  <description>
-    15-minute bucket size (value is in minutes)
-  </description>
-</property>
-
-<property>
-  <name>mapred.queue.names</name>
-  <value>default</value>
-  <description> Comma separated list of queues configured for this jobtracker.</description>
-</property>
+    <value></value>
+    <description>The keytab for the job history server principal.</description>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.blacklist.fault-timeout-window</name>
+    <value>180</value>
+    <description>
+      3-hour sliding window (value is in minutes)
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.blacklist.fault-bucket-width</name>
+    <value>15</value>
+    <description>
+      15-minute bucket size (value is in minutes)
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.queue.names</name>
+    <value>default</value>
+    <description> Comma separated list of queues configured for this jobtracker.</description>
+  </property>
 
 </configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/be01dee8/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/WEBHCAT/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/WEBHCAT/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/WEBHCAT/configuration/webhcat-site.xml
index 31d0113..cc30c7a 100644
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/WEBHCAT/configuration/webhcat-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/WEBHCAT/configuration/webhcat-site.xml
@@ -25,7 +25,7 @@ limitations under the License.
 
   <property>
     <name>templeton.port</name>
-      <value>50111</value>
+    <value>50111</value>
     <description>The HTTP port for the main server.</description>
   </property>
 
@@ -93,7 +93,7 @@ limitations under the License.
 
   <property>
     <name>templeton.zookeeper.hosts</name>
-    <value></value>
+    <value>localhost:2181</value>
     <description>ZooKeeper servers, as comma separated host:port pairs</description>
   </property>
 
@@ -104,18 +104,18 @@ limitations under the License.
   </property>
 
   <property>
-   <name>templeton.override.enabled</name>
-   <value>false</value>
-   <description>
-     Enable the override path in templeton.override.jars
-   </description>
- </property>
+    <name>templeton.override.enabled</name>
+    <value>false</value>
+    <description>
+      Enable the override path in templeton.override.jars
+    </description>
+  </property>
 
- <property>
+  <property>
     <name>templeton.streaming.jar</name>
     <value>hdfs:///apps/webhcat/hadoop-streaming.jar</value>
     <description>The hdfs path to the Hadoop streaming jar file.</description>
-  </property> 
+  </property>
 
   <property>
     <name>templeton.exec.timeout</name>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/be01dee8/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HBASE/configuration/hbase-site.xml
index 9d44de8..ead0c52 100644
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HBASE/configuration/hbase-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HBASE/configuration/hbase-site.xml
@@ -306,7 +306,7 @@
   -->
   <property>
     <name>hbase.zookeeper.quorum</name>
-    <value></value>
+    <value>localhost</value>
     <description>Comma separated list of servers in the ZooKeeper Quorum.
       For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
       By default this is set to localhost for local and pseudo-distributed modes

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/be01dee8/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/core-site.xml
index afa7338..731d984 100644
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/core-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/core-site.xml
@@ -58,7 +58,7 @@
   <property>
     <name>fs.default.name</name>
     <!-- cluster variant -->
-    <value></value>
+    <value>hdfs://localhost:8020</value>
     <description>The name of the default file system.  Either the
       literal string "local" or a host:port for NDFS.</description>
     <final>true</final>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/be01dee8/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/hdfs-site.xml
index 5cb7e4a..15666e0 100644
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/hdfs-site.xml
@@ -85,7 +85,7 @@
 
   <property>
     <name>dfs.hosts.exclude</name>
-    <value></value>
+    <value>/etc/hadoop/conf/dfs.exclude</value>
     <description>Names a file that contains a list of hosts that are
       not permitted to connect to the namenode.  The full pathname of the
       file must be specified.  If the value is empty, no hosts are
@@ -94,7 +94,7 @@
 
   <property>
     <name>dfs.hosts</name>
-    <value></value>
+    <value>/etc/hadoop/conf/dfs.include</value>
     <description>Names a file that contains a list of hosts that are
       permitted to connect to the namenode. The full pathname of the file
       must be specified.  If the value is empty, all hosts are
@@ -168,7 +168,7 @@
 
   <property>
     <name>dfs.http.address</name>
-    <value></value>
+    <value>localhost:50070</value>
     <description>The name of the default file system.  Either the
       literal string "local" or a host:port for NDFS.</description>
     <final>true</final>
@@ -309,7 +309,7 @@
   <property>
     <!-- cluster variant -->
     <name>dfs.secondary.http.address</name>
-    <value></value>
+    <value>localhost:50090</value>
     <description>Address of secondary namenode web server</description>
   </property>
 
@@ -379,7 +379,7 @@
 
   <property>
     <name>dfs.https.address</name>
-    <value></value>
+    <value>localhost:50470</value>
     <description>The https address where namenode binds</description>
 
   </property>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/be01dee8/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HIVE/configuration/hive-site.xml
index 5a42279..1337fa4 100644
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HIVE/configuration/hive-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HIVE/configuration/hive-site.xml
@@ -77,7 +77,7 @@ limitations under the License.
 
   <property>
     <name>hive.metastore.uris</name>
-    <value></value>
+    <value>thrift://localhost:9083</value>
     <description>URI for client to contact metastore server</description>
   </property>
 

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/be01dee8/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml
index df6ca71..56eeff5 100644
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml
@@ -22,12 +22,14 @@
 
 <configuration xmlns:xi="http://www.w3.org/2001/XInclude">
 
-<!-- i/o properties -->
+  <!-- i/o properties -->
 
   <property>
     <name>io.sort.mb</name>
-    <value></value>
-    <description>No description</description>
+    <value>200</value>
+    <description>
+      The total amount of Map-side buffer memory to use while sorting files
+    </description>
   </property>
 
   <property>
@@ -38,8 +40,8 @@
 
   <property>
     <name>io.sort.spill.percent</name>
-    <value></value>
-    <description>No description</description>
+    <value>0.9</value>
+    <description>Percentage of sort buffer used for record collection</description>
   </property>
 
   <property>
@@ -48,62 +50,62 @@
     <description>No description</description>
   </property>
 
-<!-- map/reduce properties -->
+  <!-- map/reduce properties -->
 
-<property>
-  <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
-  <value>250</value>
-  <description>Normally, this is the amount of time before killing
-  processes, and the recommended-default is 5.000 seconds - a value of
-  5000 here.  In this case, we are using it solely to blast tasks before
-  killing them, and killing them very quickly (1/4 second) to guarantee
-  that we do not leave VMs around for later jobs.
-  </description>
-</property>
+  <property>
+    <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
+    <value>250</value>
+    <description>Normally, this is the amount of time before killing
+      processes, and the recommended-default is 5.000 seconds - a value of
+      5000 here.  In this case, we are using it solely to blast tasks before
+      killing them, and killing them very quickly (1/4 second) to guarantee
+      that we do not leave VMs around for later jobs.
+    </description>
+  </property>
 
   <property>
     <name>mapred.job.tracker.handler.count</name>
     <value>50</value>
     <description>
-    The number of server threads for the JobTracker. This should be roughly
-    4% of the number of tasktracker nodes.
+      The number of server threads for the JobTracker. This should be roughly
+      4% of the number of tasktracker nodes.
     </description>
   </property>
 
   <property>
     <name>mapred.system.dir</name>
     <value>/mapred/system</value>
-    <description>No description</description>
+    <description>Path on the HDFS where where the MapReduce framework stores system files</description>
     <final>true</final>
   </property>
 
   <property>
     <name>mapred.job.tracker</name>
     <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
+    <value>localhost:50300</value>
+    <description>JobTracker address</description>
     <final>true</final>
   </property>
 
   <property>
     <name>mapred.job.tracker.http.address</name>
     <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
+    <value>localhost:50030</value>
+    <description>JobTracker host and http port address</description>
     <final>true</final>
   </property>
 
   <property>
     <!-- cluster specific -->
     <name>mapred.local.dir</name>
-    <value></value>
+    <value>/hadoop/mapred</value>
     <description>No description</description>
     <final>true</final>
   </property>
 
   <property>
-  <name>mapreduce.cluster.administrators</name>
-  <value> hadoop</value>
+    <name>mapreduce.cluster.administrators</name>
+    <value> hadoop</value>
   </property>
 
   <property>
@@ -114,13 +116,13 @@
 
   <property>
     <name>mapred.tasktracker.map.tasks.maximum</name>
-    <value></value>
+    <value>4</value>
     <description>No description</description>
   </property>
 
   <property>
     <name>mapred.tasktracker.reduce.tasks.maximum</name>
-    <value></value>
+    <value>2</value>
     <description>No description</description>
   </property>
 
@@ -133,14 +135,14 @@
     <name>mapred.map.tasks.speculative.execution</name>
     <value>false</value>
     <description>If true, then multiple instances of some map tasks
-               may be executed in parallel.</description>
+      may be executed in parallel.</description>
   </property>
 
   <property>
     <name>mapred.reduce.tasks.speculative.execution</name>
     <value>false</value>
     <description>If true, then multiple instances of some reduce tasks
-               may be executed in parallel.</description>
+      may be executed in parallel.</description>
   </property>
 
   <property>
@@ -152,29 +154,29 @@
     <name>mapred.inmem.merge.threshold</name>
     <value>1000</value>
     <description>The threshold, in terms of the number of files
-  for the in-memory merge process. When we accumulate threshold number of files
-  we initiate the in-memory merge and spill to disk. A value of 0 or less than
-  0 indicates we want to DON'T have any threshold and instead depend only on
-  the ramfs's memory consumption to trigger the merge.
-  </description>
+      for the in-memory merge process. When we accumulate threshold number of files
+      we initiate the in-memory merge and spill to disk. A value of 0 or less than
+      0 indicates we want to DON'T have any threshold and instead depend only on
+      the ramfs's memory consumption to trigger the merge.
+    </description>
   </property>
 
   <property>
     <name>mapred.job.shuffle.merge.percent</name>
     <value>0.66</value>
     <description>The usage threshold at which an in-memory merge will be
-  initiated, expressed as a percentage of the total memory allocated to
-  storing in-memory map outputs, as defined by
-  mapred.job.shuffle.input.buffer.percent.
-  </description>
+      initiated, expressed as a percentage of the total memory allocated to
+      storing in-memory map outputs, as defined by
+      mapred.job.shuffle.input.buffer.percent.
+    </description>
   </property>
 
   <property>
     <name>mapred.job.shuffle.input.buffer.percent</name>
     <value>0.7</value>
     <description>The percentage of memory to be allocated from the maximum heap
-  size to storing map outputs during the shuffle.
-  </description>
+      size to storing map outputs during the shuffle.
+    </description>
   </property>
 
   <property>
@@ -185,13 +187,13 @@
     </description>
   </property>
 
-<property>
-  <name>mapred.output.compression.type</name>
-  <value>BLOCK</value>
-  <description>If the job outputs are to compressed as SequenceFiles, how should
-               they be compressed? Should be one of NONE, RECORD or BLOCK.
-  </description>
-</property>
+  <property>
+    <name>mapred.output.compression.type</name>
+    <value>BLOCK</value>
+    <description>If the job outputs are to compressed as SequenceFiles, how should
+      they be compressed? Should be one of NONE, RECORD or BLOCK.
+    </description>
+  </property>
 
 
   <property>
@@ -201,14 +203,14 @@
 
   <property>
     <name>mapred.jobtracker.taskScheduler</name>
-    <value></value>
+    <value>org.apache.hadoop.mapred.CapacityTaskScheduler</value>
   </property>
 
   <property>
     <name>mapred.jobtracker.restart.recover</name>
     <value>false</value>
     <description>"true" to enable (job) recovery upon restart,
-               "false" to start afresh
+      "false" to start afresh
     </description>
   </property>
 
@@ -216,20 +218,20 @@
     <name>mapred.job.reduce.input.buffer.percent</name>
     <value>0.0</value>
     <description>The percentage of memory- relative to the maximum heap size- to
-  retain map outputs during the reduce. When the shuffle is concluded, any
-  remaining map outputs in memory must consume less than this threshold before
-  the reduce can begin.
-  </description>
+      retain map outputs during the reduce. When the shuffle is concluded, any
+      remaining map outputs in memory must consume less than this threshold before
+      the reduce can begin.
+    </description>
   </property>
 
- <property>
-  <name>mapreduce.reduce.input.limit</name>
-  <value>10737418240</value>
-  <description>The limit on the input size of the reduce. (This value
-  is 10 Gb.)  If the estimated input size of the reduce is greater than
-  this value, job is failed. A value of -1 means that there is no limit
-  set. </description>
-</property>
+  <property>
+    <name>mapreduce.reduce.input.limit</name>
+    <value>10737418240</value>
+    <description>The limit on the input size of the reduce. (This value
+      is 10 Gb.)  If the estimated input size of the reduce is greater than
+      this value, job is failed. A value of -1 means that there is no limit
+      set. </description>
+  </property>
 
 
   <!-- copied from kryptonite configuration -->
@@ -243,9 +245,9 @@
     <name>mapred.task.timeout</name>
     <value>600000</value>
     <description>The number of milliseconds before a task will be
-  terminated if it neither reads an input, writes an output, nor
-  updates its status string.
-  </description>
+      terminated if it neither reads an input, writes an output, nor
+      updates its status string.
+    </description>
   </property>
 
   <property>
@@ -257,9 +259,9 @@
   <property>
     <name>mapred.task.tracker.task-controller</name>
     <value>org.apache.hadoop.mapred.DefaultTaskController</value>
-   <description>
-     TaskController which is used to launch and manage task execution.
-  </description>
+    <description>
+      TaskController which is used to launch and manage task execution.
+    </description>
   </property>
 
   <property>
@@ -277,158 +279,192 @@
   <property>
     <name>mapred.cluster.map.memory.mb</name>
     <value>1536</value>
+    <description>
+      The virtual memory size of a single Map slot in the MapReduce framework
+    </description>
   </property>
 
   <property>
     <name>mapred.cluster.reduce.memory.mb</name>
     <value>2048</value>
+    <description>
+      The virtual memory size of a single Reduce slot in the MapReduce framework
+    </description>
   </property>
 
   <property>
     <name>mapred.job.map.memory.mb</name>
     <value>1536</value>
+    <description>
+      Virtual memory for single Map task
+    </description>
   </property>
 
   <property>
     <name>mapred.job.reduce.memory.mb</name>
     <value>2048</value>
+    <description>
+      Virtual memory for single Reduce task
+    </description>
   </property>
 
   <property>
     <name>mapred.cluster.max.map.memory.mb</name>
     <value>6144</value>
+    <description>
+      Upper limit on virtual memory size for a single Map task of any MapReduce job
+    </description>
   </property>
 
   <property>
     <name>mapred.cluster.max.reduce.memory.mb</name>
     <value>4096</value>
+    <description>
+      Upper limit on virtual memory size for a single Reduce task of any MapReduce job
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.hosts</name>
+    <value>/etc/hadoop/conf/mapred.include</value>
+    <description>
+      Names a file that contains the list of nodes that may
+      connect to the jobtracker.  If the value is empty, all hosts are
+      permitted.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.hosts.exclude</name>
+    <value>/etc/hadoop/conf/mapred.exclude</value>
+    <description>
+      Names a file that contains the list of hosts that
+      should be excluded by the jobtracker.  If the value is empty, no
+      hosts are excluded.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.max.tracker.blacklists</name>
+    <value>16</value>
+    <description>
+      if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.healthChecker.script.path</name>
+    <value>file:////mapred/jobstatus</value>
+    <description>
+      Directory path to view job status
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.healthChecker.interval</name>
+    <value>135000</value>
+  </property>
+
+  <property>
+    <name>mapred.healthChecker.script.timeout</name>
+    <value>60000</value>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker.persist.jobstatus.active</name>
+    <value>false</value>
+    <description>Indicates if persistency of job status information is
+      active or not.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker.persist.jobstatus.hours</name>
+    <value>1</value>
+    <description>The number of hours job status information is persisted in DFS.
+      The job status information will be available after it drops of the memory
+      queue and between jobtracker restarts. With a zero value the job status
+      information is not persisted at all in DFS.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker.persist.jobstatus.dir</name>
+    <value>/etc/hadoop/conf/health_check</value>
+    <description>The directory where the job status information is persisted
+      in a file system to be available after it drops of the memory queue and
+      between jobtracker restarts.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.retirejob.check</name>
+    <value>10000</value>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.retirejob.interval</name>
+    <value>21600000</value>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker.history.completed.location</name>
+    <value>/mapred/history/done</value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>mapred.task.maxvmem</name>
+    <value></value>
+    <final>true</final>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.maxtasks.per.job</name>
+    <value>-1</value>
+    <final>true</final>
+    <description>The maximum number of tasks for a single job.
+      A value of -1 indicates that there is no maximum.  </description>
+  </property>
+
+  <property>
+    <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <name>mapred.userlog.retain.hours</name>
+    <value>24</value>
+    <description>
+      The maximum time, in hours, for which the user-logs are to be retained after the job completion.
+    </description>
   </property>
 
-<property>
-  <name>mapred.hosts</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.hosts.exclude</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.max.tracker.blacklists</name>
-  <value>16</value>
-  <description>
-    if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
-  </description>
-</property>
-
-<property>
-  <name>mapred.healthChecker.script.path</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.healthChecker.interval</name>
-  <value>135000</value>
-</property>
-
-<property>
-  <name>mapred.healthChecker.script.timeout</name>
-  <value>60000</value>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.active</name>
-  <value>false</value>
-  <description>Indicates if persistency of job status information is
-  active or not.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.hours</name>
-  <value>1</value>
-  <description>The number of hours job status information is persisted in DFS.
-    The job status information will be available after it drops of the memory
-    queue and between jobtracker restarts. With a zero value the job status
-    information is not persisted at all in DFS.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.dir</name>
-  <value></value>
-  <description>The directory where the job status information is persisted
-   in a file system to be available after it drops of the memory queue and
-   between jobtracker restarts.
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.retirejob.check</name>
-  <value>10000</value>
-</property>
-
-<property>
-  <name>mapred.jobtracker.retirejob.interval</name>
-  <value>21600000</value>
-</property>
-
-<property>
-  <name>mapred.job.tracker.history.completed.location</name>
-  <value>/mapred/history/done</value>
-  <description>No description</description>
-</property>
-
-<property>
-  <name>mapred.task.maxvmem</name>
-  <value></value>
-  <final>true</final>
-   <description>No description</description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.maxtasks.per.job</name>
-  <value></value>
-  <final>true</final>
-  <description>The maximum number of tasks for a single job.
-  A value of -1 indicates that there is no maximum.  </description>
-</property>
-
-<property>
-  <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
-  <value>false</value>
-</property>
-
-<property>
-  <name>mapred.userlog.retain.hours</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.job.reuse.jvm.num.tasks</name>
-  <value>1</value>
-  <description>
-    How many tasks to run per jvm. If set to -1, there is no limit
-  </description>
-  <final>true</final>
-</property>
-
-<property>
-  <name>mapreduce.jobtracker.kerberos.principal</name>
-  <value></value>
-  <description>
+  <property>
+    <name>mapred.job.reuse.jvm.num.tasks</name>
+    <value>1</value>
+    <description>
+      How many tasks to run per jvm. If set to -1, there is no limit
+    </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>mapreduce.jobtracker.kerberos.principal</name>
+    <value></value>
+    <description>
       JT user name key.
- </description>
-</property>
+    </description>
+  </property>
 
-<property>
-  <name>mapreduce.tasktracker.kerberos.principal</name>
-   <value></value>
-  <description>
-       tt user name key. "_HOST" is replaced by the host name of the task tracker.
-   </description>
-</property>
+  <property>
+    <name>mapreduce.tasktracker.kerberos.principal</name>
+    <value></value>
+    <description>
+      tt user name key. "_HOST" is replaced by the host name of the task tracker.
+    </description>
+  </property>
 
 
   <property>
@@ -438,61 +474,61 @@
   </property>
 
 
- <property>
-   <name>mapreduce.jobtracker.keytab.file</name>
-   <value></value>
-   <description>
-       The keytab for the jobtracker principal.
-   </description>
+  <property>
+    <name>mapreduce.jobtracker.keytab.file</name>
+    <value></value>
+    <description>
+      The keytab for the jobtracker principal.
+    </description>
 
-</property>
+  </property>
 
- <property>
-   <name>mapreduce.tasktracker.keytab.file</name>
-   <value></value>
+  <property>
+    <name>mapreduce.tasktracker.keytab.file</name>
+    <value></value>
     <description>The filename of the keytab for the task tracker</description>
- </property>
+  </property>
 
- <property>
-   <name>mapred.task.tracker.http.address</name>
-   <value></value>
-   <description>Http address for task tracker.</description>
- </property>
+  <property>
+    <name>mapred.task.tracker.http.address</name>
+    <value></value>
+    <description>Http address for task tracker.</description>
+  </property>
 
- <property>
-   <name>mapreduce.jobtracker.staging.root.dir</name>
-   <value>/user</value>
- <description>The Path prefix for where the staging directories should be placed. The next level is always the user's
-   name. It is a path in the default file system.</description>
- </property>
+  <property>
+    <name>mapreduce.jobtracker.staging.root.dir</name>
+    <value>/user</value>
+    <description>The Path prefix for where the staging directories should be placed. The next level is always the user's
+      name. It is a path in the default file system.</description>
+  </property>
 
- <property>
-      <name>mapreduce.tasktracker.group</name>
-      <value>hadoop</value>
-      <description>The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.</description>
+  <property>
+    <name>mapreduce.tasktracker.group</name>
+    <value>hadoop</value>
+    <description>The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.</description>
 
- </property>
+  </property>
 
   <property>
     <name>mapreduce.jobtracker.split.metainfo.maxsize</name>
     <value>50000000</value>
     <final>true</final>
-     <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
-    initialize.
-   </description>
+    <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
+      initialize.
+    </description>
   </property>
   <property>
     <name>mapreduce.history.server.embedded</name>
     <value>false</value>
     <description>Should job history server be embedded within Job tracker
-process</description>
+      process</description>
     <final>true</final>
   </property>
 
   <property>
     <name>mapreduce.history.server.http.address</name>
     <!-- cluster variant -->
-    <value></value>
+    <value>localhost:51111</value>
     <description>Http address of the history server</description>
     <final>true</final>
   </property>
@@ -500,38 +536,38 @@ process</description>
   <property>
     <name>mapreduce.jobhistory.kerberos.principal</name>
     <!-- cluster variant -->
-  <value></value>
+    <value></value>
     <description>Job history user name key. (must map to same user as JT
-user)</description>
+      user)</description>
   </property>
 
- <property>
-   <name>mapreduce.jobhistory.keytab.file</name>
+  <property>
+    <name>mapreduce.jobhistory.keytab.file</name>
     <!-- cluster variant -->
-   <value></value>
-   <description>The keytab for the job history server principal.</description>
- </property>
-
-<property>
-  <name>mapred.jobtracker.blacklist.fault-timeout-window</name>
-  <value>180</value>
-  <description>
-    3-hour sliding window (value is in minutes)
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.blacklist.fault-bucket-width</name>
-  <value>15</value>
-  <description>
-    15-minute bucket size (value is in minutes)
-  </description>
-</property>
-
-<property>
-  <name>mapred.queue.names</name>
-  <value>default</value>
-  <description> Comma separated list of queues configured for this jobtracker.</description>
-</property>
+    <value></value>
+    <description>The keytab for the job history server principal.</description>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.blacklist.fault-timeout-window</name>
+    <value>180</value>
+    <description>
+      3-hour sliding window (value is in minutes)
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.blacklist.fault-bucket-width</name>
+    <value>15</value>
+    <description>
+      15-minute bucket size (value is in minutes)
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.queue.names</name>
+    <value>default</value>
+    <description> Comma separated list of queues configured for this jobtracker.</description>
+  </property>
 
 </configuration>


Mime
View raw message