ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From aonis...@apache.org
Subject [10/11] AMBARI-6488. Move global to env in stack definitions (aonishuk)
Date Wed, 16 Jul 2014 12:59:02 GMT
http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hadoop-env.xml
new file mode 100644
index 0000000..909ba31
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hadoop-env.xml
@@ -0,0 +1,211 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_opt_newsize</name>
+    <value>200</value>
+    <description>NameNode new generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxnewsize</name>
+    <value>200</value>
+    <description>NameNode maximum new generation size</description>
+  </property>
+  <property>
+    <name>dtnode_heapsize</name>
+    <value>1024</value>
+    <description>DataNode maximum Java heap size</description>
+  </property>
+  <property>
+    <name>proxyuser_group</name>
+    <value>users</value>
+    <description>Proxy user group.</description>
+  </property>
+
+  <property>
+    <name>security_enabled</name>
+    <value>false</value>
+    <description>Hadoop Security</description>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+  </property>
+
+  <property>
+    <name>hdfs_user</name>
+    <value>hdfs</value>
+    <description>User and Groups.</description>
+  </property>
+  <property>
+    <name>ignore_groupsusers_create</name>
+    <value>false</value>
+    <description>Whether to ignores failures on users and group creation</description>
+  </property>
+  <property>
+    <name>smokeuser</name>
+    <value>ambari-qa</value>
+    <description>User executing service checks</description>
+  </property>
+  <property>
+    <name>user_group</name>
+    <value>hadoop</value>
+    <description>Proxy user group.</description>
+  </property>
+  
+  <!-- hadoop-env.sh -->
+  <property>
+    <name>content</name>
+    <description>hadoop-env.sh content</description>
+    <value>
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
+
+# this is different for HDP1 #
+# Path to jsvc required by secure HDP 2.0 datanode
+# export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA -Dmapred.log.dir=$HADOOP_MAPRED_LOG_DIR ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER={{hdfs_user}}
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Use libraries from standard classpath
+JAVA_JDBC_LIBS=""
+#Add libraries required by mysql connector
+for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by oracle connector
+for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+#Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64
+    </value>
+  </property>
+  
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/metainfo.xml
index 5c28cf3..cd780b4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/metainfo.xml
@@ -126,8 +126,8 @@
 
       <configuration-dependencies>
         <config-type>core-site</config-type>
-        <config-type>global</config-type>
         <config-type>hdfs-site</config-type>
+        <config-type>hadoop-env</config-type>
         <config-type>hadoop-policy</config-type>
         <config-type>hdfs-log4j</config-type>
       </configuration-dependencies>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py
index fb96f93..ce1e650 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py
@@ -31,15 +31,15 @@ else:
 #security params
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
+smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 
 #exclude file
 hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
 exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
 update_exclude_file_only = config['commandParams']['update_exclude_file_only']
 
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 #hosts
 hostname = config["hostname"]
 rm_host = default("/clusterHostInfo/rm_host", [])
@@ -83,20 +83,19 @@ if has_ganglia_server:
   ganglia_server_host = ganglia_server_hosts[0]
 
 #users and groups
-yarn_user = config['configurations']['global']['yarn_user']
-hbase_user = config['configurations']['global']['hbase_user']
-nagios_user = config['configurations']['global']['nagios_user']
-oozie_user = config['configurations']['global']['oozie_user']
-webhcat_user = config['configurations']['global']['hcat_user']
-hcat_user = config['configurations']['global']['hcat_user']
-hive_user = config['configurations']['global']['hive_user']
-smoke_user =  config['configurations']['global']['smokeuser']
-mapred_user = config['configurations']['global']['mapred_user']
+hbase_user = config['configurations']['hbase-env']['hbase_user']
+nagios_user = config['configurations']['nagios-env']['nagios_user']
+oozie_user = config['configurations']['oozie-env']['oozie_user']
+webhcat_user = config['configurations']['hive-env']['hcat_user']
+hcat_user = config['configurations']['hive-env']['hcat_user']
+hive_user = config['configurations']['hive-env']['hive_user']
+smoke_user =  config['configurations']['hadoop-env']['smokeuser']
+mapred_user = config['configurations']['mapred-env']['mapred_user']
 hdfs_user = status_params.hdfs_user
 
-user_group = config['configurations']['global']['user_group']
-proxyuser_group =  config['configurations']['global']['proxyuser_group']
-nagios_group = config['configurations']['global']['nagios_group']
+user_group = config['configurations']['hadoop-env']['user_group']
+proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
+nagios_group = config['configurations']['nagios-env']['nagios_group']
 smoke_user_group = "users"
 
 #hadoop params
@@ -104,7 +103,7 @@ hadoop_conf_dir = "/etc/hadoop/conf"
 hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
 hadoop_bin = "/usr/lib/hadoop/bin"
 
-hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
 
 dfs_domain_socket_path = "/var/lib/hadoop-hdfs/dn_socket"
 dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
@@ -129,9 +128,9 @@ dfs_data_dir = config['configurations']['hdfs-site']['dfs.data.dir']
 #for create_hdfs_directory
 hostname = config["hostname"]
 hadoop_conf_dir = "/etc/hadoop/conf"
-hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
-hdfs_user = config['configurations']['global']['hdfs_user']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 import functools
 #create partial functions with common arguments for every HdfsDirectory call
 #to create hdfs directory we need to call params.HdfsDirectory in code

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/status_params.py
index 4097373..0027a4c 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/status_params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/status_params.py
@@ -21,8 +21,8 @@ from resource_management import *
 
 config = Script.get_config()
 
-hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
-hdfs_user = config['configurations']['global']['hdfs_user']
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdp_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
 datanode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
 namenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-namenode.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/global.xml
deleted file mode 100644
index 90e7627..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/global.xml
+++ /dev/null
@@ -1,100 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-  <property>
-    <name>hive_database_type</name>
-    <value>mysql</value>
-    <description>Default HIVE DB type.</description>
-  </property>
-  <property>
-    <name>hive_database</name>
-    <value>New MySQL Database</value>
-    <description>
-      Property that determines whether the HIVE DB is managed by Ambari.
-    </description>
-  </property>
-  <property>
-    <name>hive_ambari_database</name>
-    <value>MySQL</value>
-    <description>Database type.</description>
-  </property>  
-  <property>
-    <name>hive_database_name</name>
-    <value>hive</value>
-    <description>Database name.</description>
-  </property>  
-  <property>
-    <name>hive_dbroot</name>
-    <value>/usr/lib/hive/lib/</value>
-    <description>Hive DB Directory.</description>
-  </property>      
-  <property>
-    <name>hive_conf_dir</name>
-    <value>/etc/hive/conf</value>
-    <description>Hive Conf Dir.</description>
-  </property>
-  <property>
-    <name>hive_log_dir</name>
-    <value>/var/log/hive</value>
-    <description>Directory for Hive Log files.</description>
-  </property>
-  <property>
-    <name>hive_pid_dir</name>
-    <value>/var/run/hive</value>
-    <description>Hive PID Dir.</description>
-  </property>
-  <property>
-    <name>hive_aux_jars_path</name>
-    <value>/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar</value>
-    <description>Hive auxiliary jar path.</description>
-  </property>
-  <property>
-    <name>hive_user</name>
-    <value>hive</value>
-    <description>Hive User.</description>
-  </property>
-
-  <!--HCAT-->
-
-  <property>
-    <name>hcat_log_dir</name>
-    <value>/var/log/webhcat</value>
-    <description>WebHCat Log Dir.</description>
-  </property>
-  <property>
-    <name>hcat_pid_dir</name>
-    <value>/var/run/webhcat</value>
-    <description>WebHCat Pid Dir.</description>
-  </property>
-  <property>
-    <name>hcat_user</name>
-    <value>hcat</value>
-    <description>HCat User.</description>
-  </property>
-  <property>
-    <name>webhcat_user</name>
-    <value>hcat</value>
-    <description>WebHCat User.</description>
-  </property>
-  
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-env.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-env.xml
new file mode 100644
index 0000000..738818c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-env.xml
@@ -0,0 +1,139 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hive_database_type</name>
+    <value>mysql</value>
+    <description>Default HIVE DB type.</description>
+  </property>
+  <property>
+    <name>hive_database</name>
+    <value>New MySQL Database</value>
+    <description>
+      Property that determines whether the HIVE DB is managed by Ambari.
+    </description>
+  </property>
+  <property>
+    <name>hive_ambari_database</name>
+    <value>MySQL</value>
+    <description>Database type.</description>
+  </property>  
+  <property>
+    <name>hive_database_name</name>
+    <value>hive</value>
+    <description>Database name.</description>
+  </property>  
+  <property>
+    <name>hive_dbroot</name>
+    <value>/usr/lib/hive/lib/</value>
+    <description>Hive DB Directory.</description>
+  </property>      
+  <property>
+    <name>hive_conf_dir</name>
+    <value>/etc/hive/conf</value>
+    <description>Hive Conf Dir.</description>
+  </property>
+  <property>
+    <name>hive_log_dir</name>
+    <value>/var/log/hive</value>
+    <description>Directory for Hive Log files.</description>
+  </property>
+  <property>
+    <name>hive_pid_dir</name>
+    <value>/var/run/hive</value>
+    <description>Hive PID Dir.</description>
+  </property>
+  <property>
+    <name>hive_aux_jars_path</name>
+    <value>/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar</value>
+    <description>Hive auxiliary jar path.</description>
+  </property>
+  <property>
+    <name>hive_user</name>
+    <value>hive</value>
+    <description>Hive User.</description>
+  </property>
+
+  <!--HCAT-->
+
+  <property>
+    <name>hcat_log_dir</name>
+    <value>/var/log/webhcat</value>
+    <description>WebHCat Log Dir.</description>
+  </property>
+  <property>
+    <name>hcat_pid_dir</name>
+    <value>/var/run/webhcat</value>
+    <description>WebHCat Pid Dir.</description>
+  </property>
+  <property>
+    <name>hcat_user</name>
+    <value>hcat</value>
+    <description>HCat User.</description>
+  </property>
+  <property>
+    <name>webhcat_user</name>
+    <value>hcat</value>
+    <description>WebHCat User.</description>
+  </property>
+  
+  <!-- hive-env.sh -->
+  <property>
+    <name>content</name>
+    <description>hive-env.sh content</description>
+    <value>
+ if [ "$SERVICE" = "cli" ]; then
+   if [ -z "$DEBUG" ]; then
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
+   else
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+   fi
+ fi
+
+# The heap size of the jvm stared by hive shell script can be controlled via:
+
+export HADOOP_HEAPSIZE="{{hive_heapsize}}"
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
+
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR={{conf_dir}}
+
+# Folder containing extra ibraries required for hive compilation/execution can be controlled by:
+if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
+  export HIVE_AUX_JARS_PATH={{hive_aux_jars_path}}:${HIVE_AUX_JARS_PATH}
+else
+  export HIVE_AUX_JARS_PATH={{hive_aux_jars_path}}
+fi
+export METASTORE_PORT={{hive_metastore_port}}
+    </value>
+  </property>
+  
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/metainfo.xml
index 7c929b3..97bcc0a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/metainfo.xml
@@ -128,7 +128,7 @@
 
       <configuration-dependencies>
         <config-type>hive-site</config-type>
-        <config-type>global</config-type>
+        <config-type>hive-env</config-type>
         <config-type>hive-log4j</config-type>
         <config-type>hive-exec-log4j</config-type>
       </configuration-dependencies>
@@ -165,8 +165,8 @@
       </commandScript>
 
       <configuration-dependencies>
-        <config-type>global</config-type>
         <config-type>hive-site</config-type>
+        <config-type>hive-env</config-type>
       </configuration-dependencies>
 
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive.py
index 0e44e77..5ee6000 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive.py
@@ -94,7 +94,7 @@ def hive(name=None):
   File(format("{hive_config_dir}/hive-env.sh"),
        owner=params.hive_user,
        group=params.user_group,
-       content=Template('hive-env.sh.j2', conf_dir=hive_config_dir)
+       content=InlineTemplate(params.hive_env_sh_template, conf_dir=hive_config_dir)
   )
 
   crt_file(format("{hive_conf_dir}/hive-default.xml.template"))

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/params.py
index 7453ed4..be2b77e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/params.py
@@ -31,7 +31,7 @@ hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.opti
 hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
 
 #users
-hive_user = config['configurations']['global']['hive_user']
+hive_user = config['configurations']['hive-env']['hive_user']
 hive_lib = '/usr/lib/hive/lib/'
 #JDBC driver jar name
 hive_jdbc_driver = config['configurations']['hive-site']['javax.jdo.option.ConnectionDriverName']
@@ -56,26 +56,26 @@ hive_server_host = config['clusterHostInfo']['hive_server_host'][0]
 hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000")
 hive_url = format("jdbc:hive2://{hive_server_host}:{hive_server_port}")
 
-smokeuser = config['configurations']['global']['smokeuser']
+smokeuser = config['configurations']['hadoop-env']['smokeuser']
 smoke_test_sql = "/tmp/hiveserver2.sql"
 smoke_test_path = "/tmp/hiveserver2Smoke.sh"
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
 
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
 
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 hive_metastore_keytab_path =  config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
 
 #hive_env
 hive_conf_dir = "/etc/hive/conf"
-hive_dbroot = config['configurations']['global']['hive_dbroot']
-hive_log_dir = config['configurations']['global']['hive_log_dir']
+hive_dbroot = config['configurations']['hive-env']['hive_dbroot']
+hive_log_dir = config['configurations']['hive-env']['hive_log_dir']
 hive_pid_dir = status_params.hive_pid_dir
 hive_pid = status_params.hive_pid
 
 #hive-site
-hive_database_name = config['configurations']['global']['hive_database_name']
+hive_database_name = config['configurations']['hive-env']['hive_database_name']
 
 #Starting hiveserver2
 start_hiveserver2_script = 'startHiveserver2.sh'
@@ -88,8 +88,8 @@ hive_metastore_pid = status_params.hive_metastore_pid
 java_share_dir = '/usr/share/java'
 driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
 
-hdfs_user =  config['configurations']['global']['hdfs_user']
-user_group = config['configurations']['global']['user_group']
+hdfs_user =  config['configurations']['hadoop-env']['hdfs_user']
+user_group = config['configurations']['hadoop-env']['user_group']
 artifact_dir = "/tmp/HDP-artifacts/"
 
 target = format("{hive_lib}/{jdbc_jar_name}")
@@ -100,14 +100,15 @@ driver_curl_source = format("{jdk_location}/{jdbc_symlink_name}")
 start_hiveserver2_path = "/tmp/start_hiveserver2_script"
 start_metastore_path = "/tmp/start_metastore_script"
 
-hive_aux_jars_path = config['configurations']['global']['hive_aux_jars_path']
-hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
+hive_aux_jars_path = config['configurations']['hive-env']['hive_aux_jars_path']
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
 hive_heapsize = config['configurations']['hive-site']['hive.heapsize']
 java64_home = config['hostLevelParams']['java_home']
+hive_env_sh_template = config['configurations']['hive-env']['content']
 
 ##### MYSQL
 
-db_name = config['configurations']['global']['hive_database_name']
+db_name = config['configurations']['hive-env']['hive_database_name']
 mysql_user = "mysql"
 mysql_group = 'mysql'
 mysql_host = config['clusterHostInfo']['hive_mysql_host']
@@ -122,11 +123,11 @@ hcat_lib = '/usr/lib/hcatalog/share/hcatalog'
 
 hcat_dbroot = hcat_lib
 
-hcat_user = config['configurations']['global']['hcat_user']
-webhcat_user = config['configurations']['global']['webhcat_user']
+hcat_user = config['configurations']['hive-env']['hcat_user']
+webhcat_user = config['configurations']['hive-env']['webhcat_user']
 
 hcat_pid_dir = status_params.hcat_pid_dir
-hcat_log_dir = config['configurations']['global']['hcat_log_dir']   #hcat_log_dir
+hcat_log_dir = config['configurations']['hive-env']['hcat_log_dir']   #hcat_log_dir
 
 hadoop_conf_dir = '/etc/hadoop/conf'
 
@@ -151,9 +152,9 @@ hive_hdfs_user_mode = 0700
 #for create_hdfs_directory
 hostname = config["hostname"]
 hadoop_conf_dir = "/etc/hadoop/conf"
-hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
-hdfs_user = config['configurations']['global']['hdfs_user']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 import functools
 #create partial functions with common arguments for every HdfsDirectory call
 #to create hdfs directory we need to call params.HdfsDirectory in code

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/status_params.py
index 8fed3d4..f371bee 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/status_params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/status_params.py
@@ -22,12 +22,12 @@ from resource_management import *
 
 config = Script.get_config()
 
-hive_pid_dir = config['configurations']['global']['hive_pid_dir']
+hive_pid_dir = config['configurations']['hive-env']['hive_pid_dir']
 hive_pid = 'hive-server.pid'
 
 hive_metastore_pid = 'hive.pid'
 
-hcat_pid_dir = config['configurations']['global']['hcat_pid_dir'] #hcat_pid_dir
+hcat_pid_dir = config['configurations']['hive-env']['hcat_pid_dir'] #hcat_pid_dir
 
 if System.get_instance().os_family == "suse":
   daemon_name = 'mysql'

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/templates/hive-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/templates/hive-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/templates/hive-env.sh.j2
deleted file mode 100644
index 5539dc3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/templates/hive-env.sh.j2
+++ /dev/null
@@ -1,78 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Set Hive and Hadoop environment variables here. These variables can be used
-# to control the execution of Hive. It should be used by admins to configure
-# the Hive installation (so that users do not have to set environment variables
-# or set command line parameters to get correct behavior).
-#
-# The hive service being invoked (CLI/HWI etc.) is available via the environment
-# variable SERVICE
-
-# Hive Client memory usage can be an issue if a large number of clients
-# are running at the same time. The flags below have been useful in
-# reducing memory usage:
-#
- if [ "$SERVICE" = "cli" ]; then
-   if [ -z "$DEBUG" ]; then
-     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
-   else
-     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
-   fi
- fi
-
-# The heap size of the jvm stared by hive shell script can be controlled via:
-
-export HADOOP_HEAPSIZE="{{hive_heapsize}}"
-export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
-
-# Larger heap size may be required when running queries over large number of files or partitions.
-# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
-# appropriate for hive server (hwi etc).
-
-
-# Set HADOOP_HOME to point to a specific hadoop install directory
-HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-
-# Hive Configuration Directory can be controlled by:
-export HIVE_CONF_DIR={{conf_dir}}
-
-# Folder containing extra ibraries required for hive compilation/execution can be controlled by:
-if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
-  export HIVE_AUX_JARS_PATH={{hive_aux_jars_path}}:${HIVE_AUX_JARS_PATH}
-else
-  export HIVE_AUX_JARS_PATH={{hive_aux_jars_path}}
-fi
-export METASTORE_PORT={{hive_metastore_port}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/global.xml
deleted file mode 100644
index 41bb735..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/global.xml
+++ /dev/null
@@ -1,150 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-  <property>
-    <name>mapred_local_dir</name>
-    <value>/hadoop/mapred</value>
-    <description>MapRed Local Directories.</description>
-  </property>
-  <property>
-    <name>mapred_system_dir</name>
-    <value>/mapred/system</value>
-    <description>MapRed System Directories.</description>
-  </property>
-  <property>
-    <name>scheduler_name</name>
-    <value>org.apache.hadoop.mapred.CapacityTaskScheduler</value>
-    <description>MapRed Capacity Scheduler.</description>
-  </property>
-  <property>
-    <name>jtnode_opt_newsize</name>
-    <value>200</value>
-    <description>Mem New Size.</description>
-  </property>
-  <property>
-    <name>jtnode_opt_maxnewsize</name>
-    <value>200</value>
-    <description>Max New size.</description>
-  </property>
-  <property>
-    <name>hadoop_heapsize</name>
-    <value>1024</value>
-    <description>Hadoop maximum Java heap size</description>
-  </property>
-  <property>
-    <name>jtnode_heapsize</name>
-    <value>1024</value>
-    <description>Maximum Java heap size for JobTracker in MB (Java option -Xmx)</description>
-  </property>
-  <property>
-    <name>mapred_map_tasks_max</name>
-    <value>4</value>
-    <description>Number of slots that Map tasks that run simultaneously can occupy on a TaskTracker</description>
-  </property>
-  <property>
-    <name>mapred_red_tasks_max</name>
-    <value>2</value>
-    <description>Number of slots that Reduce tasks that run simultaneously can occupy on a TaskTracker</description>
-  </property>
-  <property>
-    <name>mapred_cluster_map_mem_mb</name>
-    <value>-1</value>
-    <description>The virtual memory size of a single Map slot in the MapReduce framework</description>
-  </property>
-  <property>
-    <name>mapred_cluster_red_mem_mb</name>
-    <value>-1</value>
-    <description>The virtual memory size of a single Reduce slot in the MapReduce framework</description>
-  </property>
-  <property>
-    <name>mapred_job_map_mem_mb</name>
-    <value>-1</value>
-    <description>Virtual memory for single Map task</description>
-  </property>
-  <property>
-    <name>mapred_child_java_opts_sz</name>
-    <value>768</value>
-    <description>Java options for the TaskTracker child processes.</description>
-  </property>
-  <property>
-    <name>io_sort_mb</name>
-    <value>200</value>
-    <description>The total amount of Map-side buffer memory to use while sorting files (Expert-only configuration).</description>
-  </property>
-  <property>
-    <name>io_sort_spill_percent</name>
-    <value>0.9</value>
-    <description>Percentage of sort buffer used for record collection (Expert-only configuration.</description>
-  </property>
-  <property>
-    <name>mapreduce_userlog_retainhours</name>
-    <value>24</value>
-    <description>The maximum time, in hours, for which the user-logs are to be retained after the job completion.</description>
-  </property>
-  <property>
-    <name>maxtasks_per_job</name>
-    <value>-1</value>
-    <description>Maximum number of tasks for a single Job</description>
-  </property>
-  <property>
-    <name>lzo_enabled</name>
-    <value>true</value>
-    <description>LZO compression enabled</description>
-  </property>
-  <property>
-    <name>snappy_enabled</name>
-    <value>true</value>
-    <description>LZO compression enabled</description>
-  </property>
-  <property>
-    <name>rca_enabled</name>
-    <value>true</value>
-    <description>Enable Job Diagnostics.</description>
-  </property>
-  <property>
-    <name>mapred_hosts_exclude</name>
-    <value></value>
-    <description>Exclude entered hosts</description>
-  </property>
-  <property>
-    <name>mapred_hosts_include</name>
-    <value></value>
-    <description>Include entered hosts</description>
-  </property>
-  <property>
-    <name>mapred_jobstatus_dir</name>
-    <value>/mapred/jobstatus</value>
-    <description>Job Status directory</description>
-  </property>
-  <property>
-    <name>task_controller</name>
-    <value>org.apache.hadoop.mapred.DefaultTaskController</value>
-    <description>Task Controller.</description>
-  </property>
-  <property>
-    <name>mapred_user</name>
-    <value>mapred</value>
-    <description>MapReduce User.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapred-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapred-env.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapred-env.xml
new file mode 100644
index 0000000..00f3825
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapred-env.xml
@@ -0,0 +1,150 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>mapred_local_dir</name>
+    <value>/hadoop/mapred</value>
+    <description>MapRed Local Directories.</description>
+  </property>
+  <property>
+    <name>mapred_system_dir</name>
+    <value>/mapred/system</value>
+    <description>MapRed System Directories.</description>
+  </property>
+  <property>
+    <name>scheduler_name</name>
+    <value>org.apache.hadoop.mapred.CapacityTaskScheduler</value>
+    <description>MapRed Capacity Scheduler.</description>
+  </property>
+  <property>
+    <name>jtnode_opt_newsize</name>
+    <value>200</value>
+    <description>Mem New Size.</description>
+  </property>
+  <property>
+    <name>jtnode_opt_maxnewsize</name>
+    <value>200</value>
+    <description>Max New size.</description>
+  </property>
+  <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+  </property>
+  <property>
+    <name>jtnode_heapsize</name>
+    <value>1024</value>
+    <description>Maximum Java heap size for JobTracker in MB (Java option -Xmx)</description>
+  </property>
+  <property>
+    <name>mapred_map_tasks_max</name>
+    <value>4</value>
+    <description>Number of slots that Map tasks that run simultaneously can occupy on a TaskTracker</description>
+  </property>
+  <property>
+    <name>mapred_red_tasks_max</name>
+    <value>2</value>
+    <description>Number of slots that Reduce tasks that run simultaneously can occupy on a TaskTracker</description>
+  </property>
+  <property>
+    <name>mapred_cluster_map_mem_mb</name>
+    <value>-1</value>
+    <description>The virtual memory size of a single Map slot in the MapReduce framework</description>
+  </property>
+  <property>
+    <name>mapred_cluster_red_mem_mb</name>
+    <value>-1</value>
+    <description>The virtual memory size of a single Reduce slot in the MapReduce framework</description>
+  </property>
+  <property>
+    <name>mapred_job_map_mem_mb</name>
+    <value>-1</value>
+    <description>Virtual memory for single Map task</description>
+  </property>
+  <property>
+    <name>mapred_child_java_opts_sz</name>
+    <value>768</value>
+    <description>Java options for the TaskTracker child processes.</description>
+  </property>
+  <property>
+    <name>io_sort_mb</name>
+    <value>200</value>
+    <description>The total amount of Map-side buffer memory to use while sorting files (Expert-only configuration).</description>
+  </property>
+  <property>
+    <name>io_sort_spill_percent</name>
+    <value>0.9</value>
+    <description>Percentage of sort buffer used for record collection (Expert-only configuration.</description>
+  </property>
+  <property>
+    <name>mapreduce_userlog_retainhours</name>
+    <value>24</value>
+    <description>The maximum time, in hours, for which the user-logs are to be retained after the job completion.</description>
+  </property>
+  <property>
+    <name>maxtasks_per_job</name>
+    <value>-1</value>
+    <description>Maximum number of tasks for a single Job</description>
+  </property>
+  <property>
+    <name>lzo_enabled</name>
+    <value>true</value>
+    <description>LZO compression enabled</description>
+  </property>
+  <property>
+    <name>snappy_enabled</name>
+    <value>true</value>
+    <description>LZO compression enabled</description>
+  </property>
+  <property>
+    <name>rca_enabled</name>
+    <value>true</value>
+    <description>Enable Job Diagnostics.</description>
+  </property>
+  <property>
+    <name>mapred_hosts_exclude</name>
+    <value></value>
+    <description>Exclude entered hosts</description>
+  </property>
+  <property>
+    <name>mapred_hosts_include</name>
+    <value></value>
+    <description>Include entered hosts</description>
+  </property>
+  <property>
+    <name>mapred_jobstatus_dir</name>
+    <value>/mapred/jobstatus</value>
+    <description>Job Status directory</description>
+  </property>
+  <property>
+    <name>task_controller</name>
+    <value>org.apache.hadoop.mapred.DefaultTaskController</value>
+    <description>Task Controller.</description>
+  </property>
+  <property>
+    <name>mapred_user</name>
+    <value>mapred</value>
+    <description>MapReduce User.</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/metainfo.xml
index 82de5dd..d8cb053 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/metainfo.xml
@@ -103,8 +103,8 @@
       <configuration-dependencies>
         <config-type>capacity-scheduler</config-type>
         <config-type>core-site</config-type>
-        <config-type>global</config-type>
         <config-type>mapred-site</config-type>
+        <config-type>mapred-env</config-type>
         <config-type>mapred-queue-acls</config-type>
         <config-type>mapreduce-log4j</config-type>
       </configuration-dependencies>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/params.py
index 432f803..a7e79d5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/params.py
@@ -36,18 +36,18 @@ tasktracker_pid_file = status_params.tasktracker_pid_file
 
 hadoop_libexec_dir = '/usr/lib/hadoop/libexec'
 hadoop_bin = "/usr/lib/hadoop/bin"
-user_group = config['configurations']['global']['user_group']
-hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
-mapred_log_dir_prefix = default("mapred_log_dir_prefix",hdfs_log_dir_prefix)
+user_group = config['configurations']['hadoop-env']['user_group']
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+mapred_log_dir_prefix = hdfs_log_dir_prefix
 mapred_local_dir = config['configurations']['mapred-site']['mapred.local.dir']
 update_exclude_file_only = config['commandParams']['update_exclude_file_only']
 
 hadoop_jar_location = "/usr/lib/hadoop/"
-smokeuser = config['configurations']['global']['smokeuser']
+smokeuser = config['configurations']['hadoop-env']['smokeuser']
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 
 #exclude file
 mr_exclude_hosts = default("/clusterHostInfo/decom_tt_hosts", [])
@@ -60,9 +60,9 @@ mapreduce_jobhistory_done_dir = config['configurations']['mapred-site']['mapred.
 #for create_hdfs_directory
 hostname = config["hostname"]
 hadoop_conf_dir = "/etc/hadoop/conf"
-hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
-hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
-hdfs_user = config['configurations']['global']['hdfs_user']
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 import functools
 #create partial functions with common arguments for every HdfsDirectory call
 #to create hdfs directory we need to call params.HdfsDirectory in code

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/status_params.py
index 99c4dcd..11986b0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/status_params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/status_params.py
@@ -23,8 +23,8 @@ from resource_management import *
 
 config = Script.get_config()
 
-mapred_user = config['configurations']['global']['mapred_user']
-pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
+mapred_user = config['configurations']['mapred-env']['mapred_user']
+pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
 mapred_pid_dir = format("{pid_dir_prefix}/{mapred_user}")
 
 jobtracker_pid_file = format("{mapred_pid_dir}/hadoop-{mapred_user}-jobtracker.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/configuration/global.xml
deleted file mode 100644
index f36020d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/configuration/global.xml
+++ /dev/null
@@ -1,51 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-  <property>
-    <name>nagios_user</name>
-    <value>nagios</value>
-    <description>Nagios Username.</description>
-  </property>
-  <property>
-    <name>nagios_group</name>
-    <value>nagios</value>
-    <description>Nagios Group.</description>
-  </property>
-  <property>
-    <name>nagios_web_login</name>
-    <value>nagiosadmin</value>
-    <description>Nagios web user.</description>
-  </property>
-  <property require-input = "true">
-    <name>nagios_web_password</name>
-    <value></value>
-    <type>PASSWORD</type>
-    <description>Nagios Admin Password.</description>
-  </property>
-  <property require-input = "true">
-    <name>nagios_contact</name>
-    <value></value>
-    <description>Hadoop Admin Email.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/configuration/nagios-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/configuration/nagios-env.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/configuration/nagios-env.xml
new file mode 100644
index 0000000..54c742c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/configuration/nagios-env.xml
@@ -0,0 +1,51 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>nagios_user</name>
+    <value>nagios</value>
+    <description>Nagios Username.</description>
+  </property>
+  <property>
+    <name>nagios_group</name>
+    <value>nagios</value>
+    <description>Nagios Group.</description>
+  </property>
+  <property>
+    <name>nagios_web_login</name>
+    <value>nagiosadmin</value>
+    <description>Nagios web user.</description>
+  </property>
+  <property require-input = "true">
+    <name>nagios_web_password</name>
+    <value></value>
+    <type>PASSWORD</type>
+    <description>Nagios Admin Password.</description>
+  </property>
+  <property require-input = "true">
+    <name>nagios_contact</name>
+    <value></value>
+    <description>Hadoop Admin Email.</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/metainfo.xml
index 6357787..79fecbc 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/metainfo.xml
@@ -115,7 +115,7 @@
         </osSpecific>
       </osSpecifics>
       <configuration-dependencies>
-        <config-type>global</config-type>
+        <config-type>nagios-env</config-type>
       </configuration-dependencies>      
       <monitoringService>true</monitoringService>
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/params.py
index b172dce..282beb5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/params.py
@@ -45,7 +45,7 @@ nagios_servicegroup_cfg = format("{nagios_obj_dir}/hadoop-servicegroups.cfg")
 nagios_service_cfg = format("{nagios_obj_dir}/hadoop-services.cfg")
 nagios_command_cfg = format("{nagios_obj_dir}/hadoop-commands.cfg")
 eventhandlers_dir = "/usr/lib/nagios/eventhandlers"
-nagios_principal_name = default("nagios_principal_name", "nagios")
+nagios_principal_name = default("/configurations/hadoop-env/nagios_principal_name", "nagios")
 hadoop_ssl_enabled = False
 
 namenode_metadata_port = get_port_from_url(config['configurations']['core-site']['fs.default.name'])
@@ -74,7 +74,7 @@ mapred_local_dir = config['configurations']['mapred-site']['mapred.local.dir']
 
 # this is different for HDP2
 nn_metrics_property = "FSNamesystemMetrics"
-clientPort = config['configurations']['global']['clientPort'] #ZK 
+clientPort = config['configurations']['zookeeper-env']['clientPort'] #ZK 
 
 
 java64_home = config['hostLevelParams']['java_home']
@@ -82,8 +82,8 @@ check_cpu_on = is_jdk_greater_6(java64_home)
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
 
-nagios_keytab_path = default("nagios_keytab_path", "/etc/security/keytabs/nagios.service.keytab")
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+nagios_keytab_path = default("/configurations/hadoop-env/nagios_keytab_path", "/etc/security/keytabs/nagios.service.keytab")
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 
 ganglia_port = "8651"
 ganglia_collector_slaves_port = "8660"
@@ -105,12 +105,12 @@ else:
   htpasswd_cmd = "htpasswd"
   nagios_httpd_config_file = format("/etc/httpd/conf.d/nagios.conf")
   
-nagios_user = config['configurations']['global']['nagios_user']
-nagios_group = config['configurations']['global']['nagios_group']
-nagios_web_login = config['configurations']['global']['nagios_web_login']
-nagios_web_password = config['configurations']['global']['nagios_web_password']
-user_group = config['configurations']['global']['user_group']
-nagios_contact = config['configurations']['global']['nagios_contact']
+nagios_user = config['configurations']['nagios-env']['nagios_user']
+nagios_group = config['configurations']['nagios-env']['nagios_group']
+nagios_web_login = config['configurations']['nagios-env']['nagios_web_login']
+nagios_web_password = config['configurations']['nagios-env']['nagios_web_password']
+user_group = config['configurations']['hadoop-env']['user_group']
+nagios_contact = config['configurations']['nagios-env']['nagios_contact']
 
 namenode_host = default("/clusterHostInfo/namenode_host", None)
 _snamenode_host = default("/clusterHostInfo/snamenode_host", None)

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/global.xml
deleted file mode 100644
index 1410bac..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/global.xml
+++ /dev/null
@@ -1,60 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-  <property>
-    <name>oozie_user</name>
-    <value>oozie</value>
-    <description>Oozie User.</description>
-  </property>
-  <property>
-    <name>oozie_database</name>
-    <value>New Derby Database</value>
-    <description>Oozie Server Database.</description>
-  </property>
-  <property>
-    <name>oozie_derby_database</name>
-    <value>Derby</value>
-    <description>Oozie Derby Database.</description>
-  </property>
-  <property>
-    <name>oozie_data_dir</name>
-    <value>/hadoop/oozie/data</value>
-    <description>Data directory in which the Oozie DB exists</description>
-  </property>
-  <property>
-    <name>oozie_log_dir</name>
-    <value>/var/log/oozie</value>
-    <description>Directory for oozie logs</description>
-  </property>
-  <property>
-    <name>oozie_pid_dir</name>
-    <value>/var/run/oozie</value>
-    <description>Directory in which the pid files for oozie reside.</description>
-  </property>
-  <property>
-    <name>oozie_admin_port</name>
-    <value>11001</value>
-    <description>The admin port Oozie server runs.</description>
-  </property>  
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/oozie-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/oozie-env.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/oozie-env.xml
new file mode 100644
index 0000000..038f528
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/oozie-env.xml
@@ -0,0 +1,120 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>oozie_user</name>
+    <value>oozie</value>
+    <description>Oozie User.</description>
+  </property>
+  <property>
+    <name>oozie_database</name>
+    <value>New Derby Database</value>
+    <description>Oozie Server Database.</description>
+  </property>
+  <property>
+    <name>oozie_derby_database</name>
+    <value>Derby</value>
+    <description>Oozie Derby Database.</description>
+  </property>
+  <property>
+    <name>oozie_data_dir</name>
+    <value>/hadoop/oozie/data</value>
+    <description>Data directory in which the Oozie DB exists</description>
+  </property>
+  <property>
+    <name>oozie_log_dir</name>
+    <value>/var/log/oozie</value>
+    <description>Directory for oozie logs</description>
+  </property>
+  <property>
+    <name>oozie_pid_dir</name>
+    <value>/var/run/oozie</value>
+    <description>Directory in which the pid files for oozie reside.</description>
+  </property>
+  <property>
+    <name>oozie_admin_port</name>
+    <value>11001</value>
+    <description>The admin port Oozie server runs.</description>
+  </property>  
+
+  <!-- oozie-env.sh -->
+  <property>
+    <name>content</name>
+    <description>oozie-env.sh content</description>
+    <value>
+#!/bin/bash
+
+#Set JAVA HOME
+export JAVA_HOME={{java_home}}
+export JRE_HOME={{java_home}}
+
+# Set Oozie specific environment variables here.
+
+# Settings for the Embedded Tomcat that runs Oozie
+# Java System properties for Oozie should be specified in this variable
+#
+# export CATALINA_OPTS=
+
+# Oozie configuration file to load from Oozie configuration directory
+#
+# export OOZIE_CONFIG_FILE=oozie-site.xml
+
+# Oozie logs directory
+#
+export OOZIE_LOG={{oozie_log_dir}}
+
+# Oozie pid directory
+#
+export CATALINA_PID={{pid_file}}
+
+#Location of the data for oozie
+export OOZIE_DATA={{oozie_data_dir}}
+
+# Oozie Log4J configuration file to load from Oozie configuration directory
+#
+# export OOZIE_LOG4J_FILE=oozie-log4j.properties
+
+# Reload interval of the Log4J configuration file, in seconds
+#
+# export OOZIE_LOG4J_RELOAD=10
+
+# The port Oozie server runs
+#
+export OOZIE_HTTP_PORT={{oozie_server_port}}
+
+# The admin port Oozie server runs
+#
+export OOZIE_ADMIN_PORT={{oozie_server_admin_port}}
+
+# The host name Oozie server runs on
+#
+# export OOZIE_HTTP_HOSTNAME=`hostname -f`
+
+# The base URL for callback URLs to Oozie
+#
+# export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie"
+export JAVA_LIBRARY_PATH=/usr/lib/hadoop/lib/native/Linux-amd64-64
+    </value>
+  </property>
+  
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/metainfo.xml
index 69a127a..a2c984e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/metainfo.xml
@@ -102,8 +102,8 @@
       </commandScript>
 
       <configuration-dependencies>
-        <config-type>global</config-type>
         <config-type>oozie-site</config-type>
+        <config-type>oozie-env</config-type>
         <config-type>oozie-log4j</config-type>
       </configuration-dependencies>
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/oozie.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/oozie.py
index a073064..5fd2c96 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/oozie.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/oozie.py
@@ -44,9 +44,10 @@ def oozie(is_server=False
     group = params.user_group
   )
   
-  TemplateConfig( format("{conf_dir}/oozie-env.sh"),
-    owner = params.oozie_user
-  )
+  File(format("{conf_dir}/oozie-env.sh"),
+       owner=params.oozie_user,
+       content=InlineTemplate(params.oozie_env_sh_template)
+  )  
 
   if (params.log4j_props != None):
     File(format("{params.conf_dir}/oozie-log4j.properties"),

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/params.py
index ae4da20..7072ab2 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/params.py
@@ -25,11 +25,11 @@ import status_params
 config = Script.get_config()
 
 ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-oozie_user = config['configurations']['global']['oozie_user']
-smokeuser = config['configurations']['global']['smokeuser']
+oozie_user = config['configurations']['oozie-env']['oozie_user']
+smokeuser = config['configurations']['hadoop-env']['smokeuser']
 conf_dir = "/etc/oozie/conf"
 hadoop_conf_dir = "/etc/hadoop/conf"
-user_group = config['configurations']['global']['user_group']
+user_group = config['configurations']['hadoop-env']['user_group']
 jdk_location = config['hostLevelParams']['jdk_location']
 check_db_connection_jar_name = "DBConnectionVerification.jar"
 check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
@@ -42,15 +42,15 @@ hadoop_jar_location = "/usr/lib/hadoop/"
 # for HDP2 it's "/usr/share/HDP-oozie/ext-2.2.zip"
 ext_js_path = "/usr/share/HDP-oozie/ext.zip"
 oozie_libext_dir = "/usr/lib/oozie/libext"
-lzo_enabled = config['configurations']['global']['lzo_enabled']
+lzo_enabled = config['configurations']['mapred-env']['lzo_enabled']
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
 
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 oozie_service_keytab = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.keytab.file']
 oozie_principal = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal']
-smokeuser_keytab = config['configurations']['global']['smokeuser_keytab']
-oozie_keytab = config['configurations']['global']['oozie_keytab']
+smokeuser_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+oozie_keytab = config['configurations']['hadoop-env']['oozie_keytab']
 
 oracle_driver_jar_name = "ojdbc6.jar"
 java_share_dir = "/usr/share/java"
@@ -59,14 +59,15 @@ java_home = config['hostLevelParams']['java_home']
 oozie_metastore_user_name = config['configurations']['oozie-site']['oozie.service.JPAService.jdbc.username']
 oozie_metastore_user_passwd = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.password","")
 oozie_jdbc_connection_url = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.url", "")
-oozie_log_dir = config['configurations']['global']['oozie_log_dir']
-oozie_data_dir = config['configurations']['global']['oozie_data_dir']
+oozie_log_dir = config['configurations']['oozie-env']['oozie_log_dir']
+oozie_data_dir = config['configurations']['oozie-env']['oozie_data_dir']
 oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oozie.base.url'])
-oozie_server_admin_port = config['configurations']['global']['oozie_admin_port']
+oozie_server_admin_port = config['configurations']['oozie-env']['oozie_admin_port']
 oozie_lib_dir = "/var/lib/oozie/"
 oozie_webapps_dir = "/var/lib/oozie/oozie-server/webapps/"
 
 jdbc_driver_name = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.driver", "")
+oozie_env_sh_template = config['configurations']['oozie-env']['content']
 
 if jdbc_driver_name == "com.mysql.jdbc.Driver":
   jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
@@ -101,9 +102,9 @@ oozie_hdfs_user_mode = 0775
 #for create_hdfs_directory
 hostname = config["hostname"]
 hadoop_conf_dir = "/etc/hadoop/conf"
-hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
-hdfs_user = config['configurations']['global']['hdfs_user']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 import functools
 #create partial functions with common arguments for every HdfsDirectory call
 #to create hdfs directory we need to call params.HdfsDirectory in code

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/status_params.py
index c44fcf4..a665449 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/status_params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/status_params.py
@@ -22,5 +22,5 @@ from resource_management import *
 
 config = Script.get_config()
 
-oozie_pid_dir = config['configurations']['global']['oozie_pid_dir']
+oozie_pid_dir = config['configurations']['oozie-env']['oozie_pid_dir']
 pid_file = format("{oozie_pid_dir}/oozie.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/templates/oozie-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/templates/oozie-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/templates/oozie-env.sh.j2
deleted file mode 100644
index 502ea61..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/templates/oozie-env.sh.j2
+++ /dev/null
@@ -1,88 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-
-#!/bin/bash
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#      http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#Set JAVA HOME
-export JAVA_HOME={{java_home}}
-export JRE_HOME={{java_home}}
-
-# Set Oozie specific environment variables here.
-
-# Settings for the Embedded Tomcat that runs Oozie
-# Java System properties for Oozie should be specified in this variable
-#
-# export CATALINA_OPTS=
-
-# Oozie configuration file to load from Oozie configuration directory
-#
-# export OOZIE_CONFIG_FILE=oozie-site.xml
-
-# Oozie logs directory
-#
-export OOZIE_LOG={{oozie_log_dir}}
-
-# Oozie pid directory
-#
-export CATALINA_PID={{pid_file}}
-
-#Location of the data for oozie
-export OOZIE_DATA={{oozie_data_dir}}
-
-# Oozie Log4J configuration file to load from Oozie configuration directory
-#
-# export OOZIE_LOG4J_FILE=oozie-log4j.properties
-
-# Reload interval of the Log4J configuration file, in seconds
-#
-# export OOZIE_LOG4J_RELOAD=10
-
-# The port Oozie server runs
-#
-export OOZIE_HTTP_PORT={{oozie_server_port}}
-
-# The admin port Oozie server runs
-#
-export OOZIE_ADMIN_PORT={{oozie_server_admin_port}}
-
-# The host name Oozie server runs on
-#
-# export OOZIE_HTTP_HOSTNAME=`hostname -f`
-
-# The base URL for callback URLs to Oozie
-#
-# export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie"
-export JAVA_LIBRARY_PATH=/usr/lib/hadoop/lib/native/Linux-amd64-64

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/configuration/pig-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/configuration/pig-env.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/configuration/pig-env.xml
new file mode 100644
index 0000000..d0de1ad
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/configuration/pig-env.xml
@@ -0,0 +1,34 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <!-- pig-env.sh -->
+  <property>
+    <name>content</name>
+    <description>pig-env.sh content</description>
+    <value>
+JAVA_HOME={{java64_home}}
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+    </value>
+  </property>
+  
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/metainfo.xml
index fb09b2f..7f2ec7a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/metainfo.xml
@@ -52,7 +52,7 @@
       </commandScript>
 
       <configuration-dependencies>
-        <config-type>global</config-type>
+        <config-type>pig-env</config-type>
         <config-type>pig-log4j</config-type>
       </configuration-dependencies>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/params.py
index c44acfa..b8cb82a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/params.py
@@ -26,13 +26,14 @@ config = Script.get_config()
 
 pig_conf_dir = "/etc/pig/conf"
 hadoop_conf_dir = "/etc/hadoop/conf"
-hdfs_user = config['configurations']['global']['hdfs_user']
-smokeuser = config['configurations']['global']['smokeuser']
-user_group = config['configurations']['global']['user_group']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+smokeuser = config['configurations']['hadoop-env']['smokeuser']
+user_group = config['configurations']['hadoop-env']['user_group']
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+pig_env_sh_template = config['configurations']['pig-env']['content']
 
 # not supporting 32 bit jdk.
 java64_home = config['hostLevelParams']['java_home']

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/pig.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/pig.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/pig.py
index 8a8cd52..e73a0d0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/pig.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/pig.py
@@ -30,8 +30,13 @@ def pig():
     group = params.user_group
   )
 
-  pig_TemplateConfig( ['pig-env.sh','pig.properties'])
-
+  File(format("{pig_conf_dir}/pig-env.sh"),
+       owner=params.hdfs_user,
+       content=InlineTemplate(params.pig_env_sh_template)
+  )
+  
+  pig_TemplateConfig( ['pig.properties'])
+  
   if (params.log4j_props != None):
     File(format("{params.pig_conf_dir}/log4j.properties"),
          mode=0644,


Mime
View raw message