Return-Path: X-Original-To: apmail-ambari-commits-archive@www.apache.org Delivered-To: apmail-ambari-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 2661211642 for ; Wed, 16 Jul 2014 12:58:54 +0000 (UTC) Received: (qmail 75717 invoked by uid 500); 16 Jul 2014 12:58:54 -0000 Delivered-To: apmail-ambari-commits-archive@ambari.apache.org Received: (qmail 75624 invoked by uid 500); 16 Jul 2014 12:58:54 -0000 Mailing-List: contact commits-help@ambari.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: ambari-dev@ambari.apache.org Delivered-To: mailing list commits@ambari.apache.org Received: (qmail 75378 invoked by uid 99); 16 Jul 2014 12:58:53 -0000 Received: from tyr.zones.apache.org (HELO tyr.zones.apache.org) (140.211.11.114) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 16 Jul 2014 12:58:53 +0000 Received: by tyr.zones.apache.org (Postfix, from userid 65534) id 9EA6998EA51; Wed, 16 Jul 2014 12:58:53 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: aonishuk@apache.org To: commits@ambari.apache.org Date: Wed, 16 Jul 2014 12:59:02 -0000 Message-Id: In-Reply-To: References: X-Mailer: ASF-Git Admin Mailer Subject: [10/11] AMBARI-6488. Move global to env in stack definitions (aonishuk) http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hadoop-env.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hadoop-env.xml new file mode 100644 index 0000000..909ba31 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hadoop-env.xml @@ -0,0 +1,211 @@ + + + + + + + hdfs_log_dir_prefix + /var/log/hadoop + Hadoop Log Dir Prefix + + + hadoop_pid_dir_prefix + /var/run/hadoop + Hadoop PID Dir Prefix + + + hadoop_heapsize + 1024 + Hadoop maximum Java heap size + + + namenode_heapsize + 1024 + NameNode Java heap size + + + namenode_opt_newsize + 200 + NameNode new generation size + + + namenode_opt_maxnewsize + 200 + NameNode maximum new generation size + + + dtnode_heapsize + 1024 + DataNode maximum Java heap size + + + proxyuser_group + users + Proxy user group. + + + + security_enabled + false + Hadoop Security + + + kerberos_domain + EXAMPLE.COM + Kerberos realm. + + + + hdfs_user + hdfs + User and Groups. + + + ignore_groupsusers_create + false + Whether to ignores failures on users and group creation + + + smokeuser + ambari-qa + User executing service checks + + + user_group + hadoop + Proxy user group. + + + + + content + hadoop-env.sh content + +# Set Hadoop-specific environment variables here. + +# The only required environment variable is JAVA_HOME. All others are +# optional. When running a distributed configuration it is best to +# set JAVA_HOME in this file, so that it is correctly defined on +# remote nodes. + +# The java implementation to use. Required. +export JAVA_HOME={{java_home}} +export HADOOP_HOME_WARN_SUPPRESS=1 + +# Hadoop Configuration Directory +#TODO: if env var set that can cause problems +export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}} + +# Hadoop home directory +export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop} + +# this is different for HDP1 # +# Path to jsvc required by secure HDP 2.0 datanode +# export JSVC_HOME={{jsvc_path}} + + +# The maximum amount of heap to use, in MB. Default is 1000. +export HADOOP_HEAPSIZE="{{hadoop_heapsize}}" + +export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}" + +# Extra Java runtime options. Empty by default. +export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}" + +# History server logs +export HADOOP_MAPRED_LOG_DIR={{hdfs_log_dir_prefix}}/$USER + +# Command specific options appended to HADOOP_OPTS when specified +export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}" +export HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA -Dmapred.log.dir=$HADOOP_MAPRED_LOG_DIR ${HADOOP_JOBTRACKER_OPTS}" + +HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}" +HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}" +HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}" + +export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}" + +# The following applies to multiple commands (fs, dfs, fsck, distcp etc) +export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS" +# On secure datanodes, user to run the datanode as after dropping privileges +export HADOOP_SECURE_DN_USER={{hdfs_user}} + +# Extra ssh options. Empty by default. +export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR" + +# Where log files are stored. $HADOOP_HOME/logs by default. +export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER + +# Where log files are stored in the secure data environment. +export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER + +# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default. +# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves + +# host:path where hadoop code should be rsync'd from. Unset by default. +# export HADOOP_MASTER=master:/home/$USER/src/hadoop + +# Seconds to sleep between slave commands. Unset by default. This +# can be useful in large clusters, where, e.g., slave rsyncs can +# otherwise arrive faster than the master can service them. +# export HADOOP_SLAVE_SLEEP=0.1 + +# The directory where pid files are stored. /tmp by default. +export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER +export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER + +# History server pid +export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER + +YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY" + +# A string representing this instance of hadoop. $USER by default. +export HADOOP_IDENT_STRING=$USER + +# The scheduling priority for daemon processes. See 'man nice'. + +# export HADOOP_NICENESS=10 + +# Use libraries from standard classpath +JAVA_JDBC_LIBS="" +#Add libraries required by mysql connector +for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null` +do + JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile +done +#Add libraries required by oracle connector +for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null` +do + JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile +done +#Add libraries required by nodemanager +MAPREDUCE_LIBS={{mapreduce_libs_path}} +export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS} + +# Setting path to hdfs command line +export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}} + +#Mostly required for hadoop 2.0 +export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64 + + + + http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/metainfo.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/metainfo.xml index 5c28cf3..cd780b4 100644 --- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/metainfo.xml +++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/metainfo.xml @@ -126,8 +126,8 @@ core-site - global hdfs-site + hadoop-env hadoop-policy hdfs-log4j http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py index fb96f93..ce1e650 100644 --- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py +++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py @@ -31,15 +31,15 @@ else: #security params _authentication = config['configurations']['core-site']['hadoop.security.authentication'] security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos') -smoke_user_keytab = config['configurations']['global']['smokeuser_keytab'] -hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab'] +smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab'] +hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] #exclude file hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", []) exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude'] update_exclude_file_only = config['commandParams']['update_exclude_file_only'] -kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"]) +kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"]) #hosts hostname = config["hostname"] rm_host = default("/clusterHostInfo/rm_host", []) @@ -83,20 +83,19 @@ if has_ganglia_server: ganglia_server_host = ganglia_server_hosts[0] #users and groups -yarn_user = config['configurations']['global']['yarn_user'] -hbase_user = config['configurations']['global']['hbase_user'] -nagios_user = config['configurations']['global']['nagios_user'] -oozie_user = config['configurations']['global']['oozie_user'] -webhcat_user = config['configurations']['global']['hcat_user'] -hcat_user = config['configurations']['global']['hcat_user'] -hive_user = config['configurations']['global']['hive_user'] -smoke_user = config['configurations']['global']['smokeuser'] -mapred_user = config['configurations']['global']['mapred_user'] +hbase_user = config['configurations']['hbase-env']['hbase_user'] +nagios_user = config['configurations']['nagios-env']['nagios_user'] +oozie_user = config['configurations']['oozie-env']['oozie_user'] +webhcat_user = config['configurations']['hive-env']['hcat_user'] +hcat_user = config['configurations']['hive-env']['hcat_user'] +hive_user = config['configurations']['hive-env']['hive_user'] +smoke_user = config['configurations']['hadoop-env']['smokeuser'] +mapred_user = config['configurations']['mapred-env']['mapred_user'] hdfs_user = status_params.hdfs_user -user_group = config['configurations']['global']['user_group'] -proxyuser_group = config['configurations']['global']['proxyuser_group'] -nagios_group = config['configurations']['global']['nagios_group'] +user_group = config['configurations']['hadoop-env']['user_group'] +proxyuser_group = config['configurations']['hadoop-env']['proxyuser_group'] +nagios_group = config['configurations']['nagios-env']['nagios_group'] smoke_user_group = "users" #hadoop params @@ -104,7 +103,7 @@ hadoop_conf_dir = "/etc/hadoop/conf" hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix hadoop_bin = "/usr/lib/hadoop/bin" -hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix'] +hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix'] dfs_domain_socket_path = "/var/lib/hadoop-hdfs/dn_socket" dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path) @@ -129,9 +128,9 @@ dfs_data_dir = config['configurations']['hdfs-site']['dfs.data.dir'] #for create_hdfs_directory hostname = config["hostname"] hadoop_conf_dir = "/etc/hadoop/conf" -hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab'] -hdfs_user = config['configurations']['global']['hdfs_user'] -kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"]) +hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] +hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] +kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"]) import functools #create partial functions with common arguments for every HdfsDirectory call #to create hdfs directory we need to call params.HdfsDirectory in code http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/status_params.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/status_params.py index 4097373..0027a4c 100644 --- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/status_params.py +++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/status_params.py @@ -21,8 +21,8 @@ from resource_management import * config = Script.get_config() -hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix'] -hdfs_user = config['configurations']['global']['hdfs_user'] +hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix'] +hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] hdp_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}") datanode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-datanode.pid") namenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-namenode.pid") http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/global.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/global.xml deleted file mode 100644 index 90e7627..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/global.xml +++ /dev/null @@ -1,100 +0,0 @@ - - - - - - - hive_database_type - mysql - Default HIVE DB type. - - - hive_database - New MySQL Database - - Property that determines whether the HIVE DB is managed by Ambari. - - - - hive_ambari_database - MySQL - Database type. - - - hive_database_name - hive - Database name. - - - hive_dbroot - /usr/lib/hive/lib/ - Hive DB Directory. - - - hive_conf_dir - /etc/hive/conf - Hive Conf Dir. - - - hive_log_dir - /var/log/hive - Directory for Hive Log files. - - - hive_pid_dir - /var/run/hive - Hive PID Dir. - - - hive_aux_jars_path - /usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar - Hive auxiliary jar path. - - - hive_user - hive - Hive User. - - - - - - hcat_log_dir - /var/log/webhcat - WebHCat Log Dir. - - - hcat_pid_dir - /var/run/webhcat - WebHCat Pid Dir. - - - hcat_user - hcat - HCat User. - - - webhcat_user - hcat - WebHCat User. - - - http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-env.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-env.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-env.xml new file mode 100644 index 0000000..738818c --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-env.xml @@ -0,0 +1,139 @@ + + + + + + + hive_database_type + mysql + Default HIVE DB type. + + + hive_database + New MySQL Database + + Property that determines whether the HIVE DB is managed by Ambari. + + + + hive_ambari_database + MySQL + Database type. + + + hive_database_name + hive + Database name. + + + hive_dbroot + /usr/lib/hive/lib/ + Hive DB Directory. + + + hive_conf_dir + /etc/hive/conf + Hive Conf Dir. + + + hive_log_dir + /var/log/hive + Directory for Hive Log files. + + + hive_pid_dir + /var/run/hive + Hive PID Dir. + + + hive_aux_jars_path + /usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar + Hive auxiliary jar path. + + + hive_user + hive + Hive User. + + + + + + hcat_log_dir + /var/log/webhcat + WebHCat Log Dir. + + + hcat_pid_dir + /var/run/webhcat + WebHCat Pid Dir. + + + hcat_user + hcat + HCat User. + + + webhcat_user + hcat + WebHCat User. + + + + + content + hive-env.sh content + + if [ "$SERVICE" = "cli" ]; then + if [ -z "$DEBUG" ]; then + export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit" + else + export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit" + fi + fi + +# The heap size of the jvm stared by hive shell script can be controlled via: + +export HADOOP_HEAPSIZE="{{hive_heapsize}}" +export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS" + +# Larger heap size may be required when running queries over large number of files or partitions. +# By default hive shell scripts use a heap size of 256 (MB). Larger heap size would also be +# appropriate for hive server (hwi etc). + + +# Set HADOOP_HOME to point to a specific hadoop install directory +HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}} + +# Hive Configuration Directory can be controlled by: +export HIVE_CONF_DIR={{conf_dir}} + +# Folder containing extra ibraries required for hive compilation/execution can be controlled by: +if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then + export HIVE_AUX_JARS_PATH={{hive_aux_jars_path}}:${HIVE_AUX_JARS_PATH} +else + export HIVE_AUX_JARS_PATH={{hive_aux_jars_path}} +fi +export METASTORE_PORT={{hive_metastore_port}} + + + + http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/metainfo.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/metainfo.xml index 7c929b3..97bcc0a 100644 --- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/metainfo.xml +++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/metainfo.xml @@ -128,7 +128,7 @@ hive-site - global + hive-env hive-log4j hive-exec-log4j @@ -165,8 +165,8 @@ - global hive-site + hive-env http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive.py index 0e44e77..5ee6000 100644 --- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive.py +++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive.py @@ -94,7 +94,7 @@ def hive(name=None): File(format("{hive_config_dir}/hive-env.sh"), owner=params.hive_user, group=params.user_group, - content=Template('hive-env.sh.j2', conf_dir=hive_config_dir) + content=InlineTemplate(params.hive_env_sh_template, conf_dir=hive_config_dir) ) crt_file(format("{hive_conf_dir}/hive-default.xml.template")) http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/params.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/params.py index 7453ed4..be2b77e 100644 --- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/params.py +++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/params.py @@ -31,7 +31,7 @@ hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.opti hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword'] #users -hive_user = config['configurations']['global']['hive_user'] +hive_user = config['configurations']['hive-env']['hive_user'] hive_lib = '/usr/lib/hive/lib/' #JDBC driver jar name hive_jdbc_driver = config['configurations']['hive-site']['javax.jdo.option.ConnectionDriverName'] @@ -56,26 +56,26 @@ hive_server_host = config['clusterHostInfo']['hive_server_host'][0] hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000") hive_url = format("jdbc:hive2://{hive_server_host}:{hive_server_port}") -smokeuser = config['configurations']['global']['smokeuser'] +smokeuser = config['configurations']['hadoop-env']['smokeuser'] smoke_test_sql = "/tmp/hiveserver2.sql" smoke_test_path = "/tmp/hiveserver2Smoke.sh" -smoke_user_keytab = config['configurations']['global']['smokeuser_keytab'] +smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab'] _authentication = config['configurations']['core-site']['hadoop.security.authentication'] security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos') -kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"]) +kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"]) hive_metastore_keytab_path = config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file'] #hive_env hive_conf_dir = "/etc/hive/conf" -hive_dbroot = config['configurations']['global']['hive_dbroot'] -hive_log_dir = config['configurations']['global']['hive_log_dir'] +hive_dbroot = config['configurations']['hive-env']['hive_dbroot'] +hive_log_dir = config['configurations']['hive-env']['hive_log_dir'] hive_pid_dir = status_params.hive_pid_dir hive_pid = status_params.hive_pid #hive-site -hive_database_name = config['configurations']['global']['hive_database_name'] +hive_database_name = config['configurations']['hive-env']['hive_database_name'] #Starting hiveserver2 start_hiveserver2_script = 'startHiveserver2.sh' @@ -88,8 +88,8 @@ hive_metastore_pid = status_params.hive_metastore_pid java_share_dir = '/usr/share/java' driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}") -hdfs_user = config['configurations']['global']['hdfs_user'] -user_group = config['configurations']['global']['user_group'] +hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] +user_group = config['configurations']['hadoop-env']['user_group'] artifact_dir = "/tmp/HDP-artifacts/" target = format("{hive_lib}/{jdbc_jar_name}") @@ -100,14 +100,15 @@ driver_curl_source = format("{jdk_location}/{jdbc_symlink_name}") start_hiveserver2_path = "/tmp/start_hiveserver2_script" start_metastore_path = "/tmp/start_metastore_script" -hive_aux_jars_path = config['configurations']['global']['hive_aux_jars_path'] -hadoop_heapsize = config['configurations']['global']['hadoop_heapsize'] +hive_aux_jars_path = config['configurations']['hive-env']['hive_aux_jars_path'] +hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize'] hive_heapsize = config['configurations']['hive-site']['hive.heapsize'] java64_home = config['hostLevelParams']['java_home'] +hive_env_sh_template = config['configurations']['hive-env']['content'] ##### MYSQL -db_name = config['configurations']['global']['hive_database_name'] +db_name = config['configurations']['hive-env']['hive_database_name'] mysql_user = "mysql" mysql_group = 'mysql' mysql_host = config['clusterHostInfo']['hive_mysql_host'] @@ -122,11 +123,11 @@ hcat_lib = '/usr/lib/hcatalog/share/hcatalog' hcat_dbroot = hcat_lib -hcat_user = config['configurations']['global']['hcat_user'] -webhcat_user = config['configurations']['global']['webhcat_user'] +hcat_user = config['configurations']['hive-env']['hcat_user'] +webhcat_user = config['configurations']['hive-env']['webhcat_user'] hcat_pid_dir = status_params.hcat_pid_dir -hcat_log_dir = config['configurations']['global']['hcat_log_dir'] #hcat_log_dir +hcat_log_dir = config['configurations']['hive-env']['hcat_log_dir'] #hcat_log_dir hadoop_conf_dir = '/etc/hadoop/conf' @@ -151,9 +152,9 @@ hive_hdfs_user_mode = 0700 #for create_hdfs_directory hostname = config["hostname"] hadoop_conf_dir = "/etc/hadoop/conf" -hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab'] -hdfs_user = config['configurations']['global']['hdfs_user'] -kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"]) +hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] +hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] +kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"]) import functools #create partial functions with common arguments for every HdfsDirectory call #to create hdfs directory we need to call params.HdfsDirectory in code http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/status_params.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/status_params.py index 8fed3d4..f371bee 100644 --- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/status_params.py +++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/status_params.py @@ -22,12 +22,12 @@ from resource_management import * config = Script.get_config() -hive_pid_dir = config['configurations']['global']['hive_pid_dir'] +hive_pid_dir = config['configurations']['hive-env']['hive_pid_dir'] hive_pid = 'hive-server.pid' hive_metastore_pid = 'hive.pid' -hcat_pid_dir = config['configurations']['global']['hcat_pid_dir'] #hcat_pid_dir +hcat_pid_dir = config['configurations']['hive-env']['hcat_pid_dir'] #hcat_pid_dir if System.get_instance().os_family == "suse": daemon_name = 'mysql' http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/templates/hive-env.sh.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/templates/hive-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/templates/hive-env.sh.j2 deleted file mode 100644 index 5539dc3..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/templates/hive-env.sh.j2 +++ /dev/null @@ -1,78 +0,0 @@ -{# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#} - - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Set Hive and Hadoop environment variables here. These variables can be used -# to control the execution of Hive. It should be used by admins to configure -# the Hive installation (so that users do not have to set environment variables -# or set command line parameters to get correct behavior). -# -# The hive service being invoked (CLI/HWI etc.) is available via the environment -# variable SERVICE - -# Hive Client memory usage can be an issue if a large number of clients -# are running at the same time. The flags below have been useful in -# reducing memory usage: -# - if [ "$SERVICE" = "cli" ]; then - if [ -z "$DEBUG" ]; then - export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit" - else - export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit" - fi - fi - -# The heap size of the jvm stared by hive shell script can be controlled via: - -export HADOOP_HEAPSIZE="{{hive_heapsize}}" -export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS" - -# Larger heap size may be required when running queries over large number of files or partitions. -# By default hive shell scripts use a heap size of 256 (MB). Larger heap size would also be -# appropriate for hive server (hwi etc). - - -# Set HADOOP_HOME to point to a specific hadoop install directory -HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}} - -# Hive Configuration Directory can be controlled by: -export HIVE_CONF_DIR={{conf_dir}} - -# Folder containing extra ibraries required for hive compilation/execution can be controlled by: -if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then - export HIVE_AUX_JARS_PATH={{hive_aux_jars_path}}:${HIVE_AUX_JARS_PATH} -else - export HIVE_AUX_JARS_PATH={{hive_aux_jars_path}} -fi -export METASTORE_PORT={{hive_metastore_port}} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/global.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/global.xml deleted file mode 100644 index 41bb735..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/global.xml +++ /dev/null @@ -1,150 +0,0 @@ - - - - - - - mapred_local_dir - /hadoop/mapred - MapRed Local Directories. - - - mapred_system_dir - /mapred/system - MapRed System Directories. - - - scheduler_name - org.apache.hadoop.mapred.CapacityTaskScheduler - MapRed Capacity Scheduler. - - - jtnode_opt_newsize - 200 - Mem New Size. - - - jtnode_opt_maxnewsize - 200 - Max New size. - - - hadoop_heapsize - 1024 - Hadoop maximum Java heap size - - - jtnode_heapsize - 1024 - Maximum Java heap size for JobTracker in MB (Java option -Xmx) - - - mapred_map_tasks_max - 4 - Number of slots that Map tasks that run simultaneously can occupy on a TaskTracker - - - mapred_red_tasks_max - 2 - Number of slots that Reduce tasks that run simultaneously can occupy on a TaskTracker - - - mapred_cluster_map_mem_mb - -1 - The virtual memory size of a single Map slot in the MapReduce framework - - - mapred_cluster_red_mem_mb - -1 - The virtual memory size of a single Reduce slot in the MapReduce framework - - - mapred_job_map_mem_mb - -1 - Virtual memory for single Map task - - - mapred_child_java_opts_sz - 768 - Java options for the TaskTracker child processes. - - - io_sort_mb - 200 - The total amount of Map-side buffer memory to use while sorting files (Expert-only configuration). - - - io_sort_spill_percent - 0.9 - Percentage of sort buffer used for record collection (Expert-only configuration. - - - mapreduce_userlog_retainhours - 24 - The maximum time, in hours, for which the user-logs are to be retained after the job completion. - - - maxtasks_per_job - -1 - Maximum number of tasks for a single Job - - - lzo_enabled - true - LZO compression enabled - - - snappy_enabled - true - LZO compression enabled - - - rca_enabled - true - Enable Job Diagnostics. - - - mapred_hosts_exclude - - Exclude entered hosts - - - mapred_hosts_include - - Include entered hosts - - - mapred_jobstatus_dir - /mapred/jobstatus - Job Status directory - - - task_controller - org.apache.hadoop.mapred.DefaultTaskController - Task Controller. - - - mapred_user - mapred - MapReduce User. - - - http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapred-env.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapred-env.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapred-env.xml new file mode 100644 index 0000000..00f3825 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapred-env.xml @@ -0,0 +1,150 @@ + + + + + + + mapred_local_dir + /hadoop/mapred + MapRed Local Directories. + + + mapred_system_dir + /mapred/system + MapRed System Directories. + + + scheduler_name + org.apache.hadoop.mapred.CapacityTaskScheduler + MapRed Capacity Scheduler. + + + jtnode_opt_newsize + 200 + Mem New Size. + + + jtnode_opt_maxnewsize + 200 + Max New size. + + + hadoop_heapsize + 1024 + Hadoop maximum Java heap size + + + jtnode_heapsize + 1024 + Maximum Java heap size for JobTracker in MB (Java option -Xmx) + + + mapred_map_tasks_max + 4 + Number of slots that Map tasks that run simultaneously can occupy on a TaskTracker + + + mapred_red_tasks_max + 2 + Number of slots that Reduce tasks that run simultaneously can occupy on a TaskTracker + + + mapred_cluster_map_mem_mb + -1 + The virtual memory size of a single Map slot in the MapReduce framework + + + mapred_cluster_red_mem_mb + -1 + The virtual memory size of a single Reduce slot in the MapReduce framework + + + mapred_job_map_mem_mb + -1 + Virtual memory for single Map task + + + mapred_child_java_opts_sz + 768 + Java options for the TaskTracker child processes. + + + io_sort_mb + 200 + The total amount of Map-side buffer memory to use while sorting files (Expert-only configuration). + + + io_sort_spill_percent + 0.9 + Percentage of sort buffer used for record collection (Expert-only configuration. + + + mapreduce_userlog_retainhours + 24 + The maximum time, in hours, for which the user-logs are to be retained after the job completion. + + + maxtasks_per_job + -1 + Maximum number of tasks for a single Job + + + lzo_enabled + true + LZO compression enabled + + + snappy_enabled + true + LZO compression enabled + + + rca_enabled + true + Enable Job Diagnostics. + + + mapred_hosts_exclude + + Exclude entered hosts + + + mapred_hosts_include + + Include entered hosts + + + mapred_jobstatus_dir + /mapred/jobstatus + Job Status directory + + + task_controller + org.apache.hadoop.mapred.DefaultTaskController + Task Controller. + + + mapred_user + mapred + MapReduce User. + + + http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/metainfo.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/metainfo.xml index 82de5dd..d8cb053 100644 --- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/metainfo.xml +++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/metainfo.xml @@ -103,8 +103,8 @@ capacity-scheduler core-site - global mapred-site + mapred-env mapred-queue-acls mapreduce-log4j http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/params.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/params.py index 432f803..a7e79d5 100644 --- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/params.py +++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/params.py @@ -36,18 +36,18 @@ tasktracker_pid_file = status_params.tasktracker_pid_file hadoop_libexec_dir = '/usr/lib/hadoop/libexec' hadoop_bin = "/usr/lib/hadoop/bin" -user_group = config['configurations']['global']['user_group'] -hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix'] -mapred_log_dir_prefix = default("mapred_log_dir_prefix",hdfs_log_dir_prefix) +user_group = config['configurations']['hadoop-env']['user_group'] +hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix'] +mapred_log_dir_prefix = hdfs_log_dir_prefix mapred_local_dir = config['configurations']['mapred-site']['mapred.local.dir'] update_exclude_file_only = config['commandParams']['update_exclude_file_only'] hadoop_jar_location = "/usr/lib/hadoop/" -smokeuser = config['configurations']['global']['smokeuser'] +smokeuser = config['configurations']['hadoop-env']['smokeuser'] _authentication = config['configurations']['core-site']['hadoop.security.authentication'] security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos') -smoke_user_keytab = config['configurations']['global']['smokeuser_keytab'] -kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"]) +smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab'] +kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"]) #exclude file mr_exclude_hosts = default("/clusterHostInfo/decom_tt_hosts", []) @@ -60,9 +60,9 @@ mapreduce_jobhistory_done_dir = config['configurations']['mapred-site']['mapred. #for create_hdfs_directory hostname = config["hostname"] hadoop_conf_dir = "/etc/hadoop/conf" -hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix'] -hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab'] -hdfs_user = config['configurations']['global']['hdfs_user'] +hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix'] +hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] +hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] import functools #create partial functions with common arguments for every HdfsDirectory call #to create hdfs directory we need to call params.HdfsDirectory in code http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/status_params.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/status_params.py index 99c4dcd..11986b0 100644 --- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/status_params.py +++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/status_params.py @@ -23,8 +23,8 @@ from resource_management import * config = Script.get_config() -mapred_user = config['configurations']['global']['mapred_user'] -pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix'] +mapred_user = config['configurations']['mapred-env']['mapred_user'] +pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix'] mapred_pid_dir = format("{pid_dir_prefix}/{mapred_user}") jobtracker_pid_file = format("{mapred_pid_dir}/hadoop-{mapred_user}-jobtracker.pid") http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/configuration/global.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/configuration/global.xml deleted file mode 100644 index f36020d..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/configuration/global.xml +++ /dev/null @@ -1,51 +0,0 @@ - - - - - - - nagios_user - nagios - Nagios Username. - - - nagios_group - nagios - Nagios Group. - - - nagios_web_login - nagiosadmin - Nagios web user. - - - nagios_web_password - - PASSWORD - Nagios Admin Password. - - - nagios_contact - - Hadoop Admin Email. - - - http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/configuration/nagios-env.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/configuration/nagios-env.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/configuration/nagios-env.xml new file mode 100644 index 0000000..54c742c --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/configuration/nagios-env.xml @@ -0,0 +1,51 @@ + + + + + + + nagios_user + nagios + Nagios Username. + + + nagios_group + nagios + Nagios Group. + + + nagios_web_login + nagiosadmin + Nagios web user. + + + nagios_web_password + + PASSWORD + Nagios Admin Password. + + + nagios_contact + + Hadoop Admin Email. + + + http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/metainfo.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/metainfo.xml index 6357787..79fecbc 100644 --- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/metainfo.xml +++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/metainfo.xml @@ -115,7 +115,7 @@ - global + nagios-env true http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/params.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/params.py index b172dce..282beb5 100644 --- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/params.py +++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/params.py @@ -45,7 +45,7 @@ nagios_servicegroup_cfg = format("{nagios_obj_dir}/hadoop-servicegroups.cfg") nagios_service_cfg = format("{nagios_obj_dir}/hadoop-services.cfg") nagios_command_cfg = format("{nagios_obj_dir}/hadoop-commands.cfg") eventhandlers_dir = "/usr/lib/nagios/eventhandlers" -nagios_principal_name = default("nagios_principal_name", "nagios") +nagios_principal_name = default("/configurations/hadoop-env/nagios_principal_name", "nagios") hadoop_ssl_enabled = False namenode_metadata_port = get_port_from_url(config['configurations']['core-site']['fs.default.name']) @@ -74,7 +74,7 @@ mapred_local_dir = config['configurations']['mapred-site']['mapred.local.dir'] # this is different for HDP2 nn_metrics_property = "FSNamesystemMetrics" -clientPort = config['configurations']['global']['clientPort'] #ZK +clientPort = config['configurations']['zookeeper-env']['clientPort'] #ZK java64_home = config['hostLevelParams']['java_home'] @@ -82,8 +82,8 @@ check_cpu_on = is_jdk_greater_6(java64_home) _authentication = config['configurations']['core-site']['hadoop.security.authentication'] security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos') -nagios_keytab_path = default("nagios_keytab_path", "/etc/security/keytabs/nagios.service.keytab") -kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"]) +nagios_keytab_path = default("/configurations/hadoop-env/nagios_keytab_path", "/etc/security/keytabs/nagios.service.keytab") +kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"]) ganglia_port = "8651" ganglia_collector_slaves_port = "8660" @@ -105,12 +105,12 @@ else: htpasswd_cmd = "htpasswd" nagios_httpd_config_file = format("/etc/httpd/conf.d/nagios.conf") -nagios_user = config['configurations']['global']['nagios_user'] -nagios_group = config['configurations']['global']['nagios_group'] -nagios_web_login = config['configurations']['global']['nagios_web_login'] -nagios_web_password = config['configurations']['global']['nagios_web_password'] -user_group = config['configurations']['global']['user_group'] -nagios_contact = config['configurations']['global']['nagios_contact'] +nagios_user = config['configurations']['nagios-env']['nagios_user'] +nagios_group = config['configurations']['nagios-env']['nagios_group'] +nagios_web_login = config['configurations']['nagios-env']['nagios_web_login'] +nagios_web_password = config['configurations']['nagios-env']['nagios_web_password'] +user_group = config['configurations']['hadoop-env']['user_group'] +nagios_contact = config['configurations']['nagios-env']['nagios_contact'] namenode_host = default("/clusterHostInfo/namenode_host", None) _snamenode_host = default("/clusterHostInfo/snamenode_host", None) http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/global.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/global.xml deleted file mode 100644 index 1410bac..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/global.xml +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - oozie_user - oozie - Oozie User. - - - oozie_database - New Derby Database - Oozie Server Database. - - - oozie_derby_database - Derby - Oozie Derby Database. - - - oozie_data_dir - /hadoop/oozie/data - Data directory in which the Oozie DB exists - - - oozie_log_dir - /var/log/oozie - Directory for oozie logs - - - oozie_pid_dir - /var/run/oozie - Directory in which the pid files for oozie reside. - - - oozie_admin_port - 11001 - The admin port Oozie server runs. - - - http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/oozie-env.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/oozie-env.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/oozie-env.xml new file mode 100644 index 0000000..038f528 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/oozie-env.xml @@ -0,0 +1,120 @@ + + + + + + + oozie_user + oozie + Oozie User. + + + oozie_database + New Derby Database + Oozie Server Database. + + + oozie_derby_database + Derby + Oozie Derby Database. + + + oozie_data_dir + /hadoop/oozie/data + Data directory in which the Oozie DB exists + + + oozie_log_dir + /var/log/oozie + Directory for oozie logs + + + oozie_pid_dir + /var/run/oozie + Directory in which the pid files for oozie reside. + + + oozie_admin_port + 11001 + The admin port Oozie server runs. + + + + + content + oozie-env.sh content + +#!/bin/bash + +#Set JAVA HOME +export JAVA_HOME={{java_home}} +export JRE_HOME={{java_home}} + +# Set Oozie specific environment variables here. + +# Settings for the Embedded Tomcat that runs Oozie +# Java System properties for Oozie should be specified in this variable +# +# export CATALINA_OPTS= + +# Oozie configuration file to load from Oozie configuration directory +# +# export OOZIE_CONFIG_FILE=oozie-site.xml + +# Oozie logs directory +# +export OOZIE_LOG={{oozie_log_dir}} + +# Oozie pid directory +# +export CATALINA_PID={{pid_file}} + +#Location of the data for oozie +export OOZIE_DATA={{oozie_data_dir}} + +# Oozie Log4J configuration file to load from Oozie configuration directory +# +# export OOZIE_LOG4J_FILE=oozie-log4j.properties + +# Reload interval of the Log4J configuration file, in seconds +# +# export OOZIE_LOG4J_RELOAD=10 + +# The port Oozie server runs +# +export OOZIE_HTTP_PORT={{oozie_server_port}} + +# The admin port Oozie server runs +# +export OOZIE_ADMIN_PORT={{oozie_server_admin_port}} + +# The host name Oozie server runs on +# +# export OOZIE_HTTP_HOSTNAME=`hostname -f` + +# The base URL for callback URLs to Oozie +# +# export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie" +export JAVA_LIBRARY_PATH=/usr/lib/hadoop/lib/native/Linux-amd64-64 + + + + http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/metainfo.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/metainfo.xml index 69a127a..a2c984e 100644 --- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/metainfo.xml +++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/metainfo.xml @@ -102,8 +102,8 @@ - global oozie-site + oozie-env oozie-log4j http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/oozie.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/oozie.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/oozie.py index a073064..5fd2c96 100644 --- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/oozie.py +++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/oozie.py @@ -44,9 +44,10 @@ def oozie(is_server=False group = params.user_group ) - TemplateConfig( format("{conf_dir}/oozie-env.sh"), - owner = params.oozie_user - ) + File(format("{conf_dir}/oozie-env.sh"), + owner=params.oozie_user, + content=InlineTemplate(params.oozie_env_sh_template) + ) if (params.log4j_props != None): File(format("{params.conf_dir}/oozie-log4j.properties"), http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/params.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/params.py index ae4da20..7072ab2 100644 --- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/params.py +++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/params.py @@ -25,11 +25,11 @@ import status_params config = Script.get_config() ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0] -oozie_user = config['configurations']['global']['oozie_user'] -smokeuser = config['configurations']['global']['smokeuser'] +oozie_user = config['configurations']['oozie-env']['oozie_user'] +smokeuser = config['configurations']['hadoop-env']['smokeuser'] conf_dir = "/etc/oozie/conf" hadoop_conf_dir = "/etc/hadoop/conf" -user_group = config['configurations']['global']['user_group'] +user_group = config['configurations']['hadoop-env']['user_group'] jdk_location = config['hostLevelParams']['jdk_location'] check_db_connection_jar_name = "DBConnectionVerification.jar" check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}") @@ -42,15 +42,15 @@ hadoop_jar_location = "/usr/lib/hadoop/" # for HDP2 it's "/usr/share/HDP-oozie/ext-2.2.zip" ext_js_path = "/usr/share/HDP-oozie/ext.zip" oozie_libext_dir = "/usr/lib/oozie/libext" -lzo_enabled = config['configurations']['global']['lzo_enabled'] +lzo_enabled = config['configurations']['mapred-env']['lzo_enabled'] _authentication = config['configurations']['core-site']['hadoop.security.authentication'] security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos') -kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"]) +kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"]) oozie_service_keytab = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.keytab.file'] oozie_principal = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal'] -smokeuser_keytab = config['configurations']['global']['smokeuser_keytab'] -oozie_keytab = config['configurations']['global']['oozie_keytab'] +smokeuser_keytab = config['configurations']['hadoop-env']['smokeuser_keytab'] +oozie_keytab = config['configurations']['hadoop-env']['oozie_keytab'] oracle_driver_jar_name = "ojdbc6.jar" java_share_dir = "/usr/share/java" @@ -59,14 +59,15 @@ java_home = config['hostLevelParams']['java_home'] oozie_metastore_user_name = config['configurations']['oozie-site']['oozie.service.JPAService.jdbc.username'] oozie_metastore_user_passwd = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.password","") oozie_jdbc_connection_url = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.url", "") -oozie_log_dir = config['configurations']['global']['oozie_log_dir'] -oozie_data_dir = config['configurations']['global']['oozie_data_dir'] +oozie_log_dir = config['configurations']['oozie-env']['oozie_log_dir'] +oozie_data_dir = config['configurations']['oozie-env']['oozie_data_dir'] oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oozie.base.url']) -oozie_server_admin_port = config['configurations']['global']['oozie_admin_port'] +oozie_server_admin_port = config['configurations']['oozie-env']['oozie_admin_port'] oozie_lib_dir = "/var/lib/oozie/" oozie_webapps_dir = "/var/lib/oozie/oozie-server/webapps/" jdbc_driver_name = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.driver", "") +oozie_env_sh_template = config['configurations']['oozie-env']['content'] if jdbc_driver_name == "com.mysql.jdbc.Driver": jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar" @@ -101,9 +102,9 @@ oozie_hdfs_user_mode = 0775 #for create_hdfs_directory hostname = config["hostname"] hadoop_conf_dir = "/etc/hadoop/conf" -hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab'] -hdfs_user = config['configurations']['global']['hdfs_user'] -kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"]) +hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] +hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] +kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"]) import functools #create partial functions with common arguments for every HdfsDirectory call #to create hdfs directory we need to call params.HdfsDirectory in code http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/status_params.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/status_params.py index c44fcf4..a665449 100644 --- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/status_params.py +++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/status_params.py @@ -22,5 +22,5 @@ from resource_management import * config = Script.get_config() -oozie_pid_dir = config['configurations']['global']['oozie_pid_dir'] +oozie_pid_dir = config['configurations']['oozie-env']['oozie_pid_dir'] pid_file = format("{oozie_pid_dir}/oozie.pid") http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/templates/oozie-env.sh.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/templates/oozie-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/templates/oozie-env.sh.j2 deleted file mode 100644 index 502ea61..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/templates/oozie-env.sh.j2 +++ /dev/null @@ -1,88 +0,0 @@ -{# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#} - - -#!/bin/bash -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -#Set JAVA HOME -export JAVA_HOME={{java_home}} -export JRE_HOME={{java_home}} - -# Set Oozie specific environment variables here. - -# Settings for the Embedded Tomcat that runs Oozie -# Java System properties for Oozie should be specified in this variable -# -# export CATALINA_OPTS= - -# Oozie configuration file to load from Oozie configuration directory -# -# export OOZIE_CONFIG_FILE=oozie-site.xml - -# Oozie logs directory -# -export OOZIE_LOG={{oozie_log_dir}} - -# Oozie pid directory -# -export CATALINA_PID={{pid_file}} - -#Location of the data for oozie -export OOZIE_DATA={{oozie_data_dir}} - -# Oozie Log4J configuration file to load from Oozie configuration directory -# -# export OOZIE_LOG4J_FILE=oozie-log4j.properties - -# Reload interval of the Log4J configuration file, in seconds -# -# export OOZIE_LOG4J_RELOAD=10 - -# The port Oozie server runs -# -export OOZIE_HTTP_PORT={{oozie_server_port}} - -# The admin port Oozie server runs -# -export OOZIE_ADMIN_PORT={{oozie_server_admin_port}} - -# The host name Oozie server runs on -# -# export OOZIE_HTTP_HOSTNAME=`hostname -f` - -# The base URL for callback URLs to Oozie -# -# export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie" -export JAVA_LIBRARY_PATH=/usr/lib/hadoop/lib/native/Linux-amd64-64 http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/configuration/pig-env.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/configuration/pig-env.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/configuration/pig-env.xml new file mode 100644 index 0000000..d0de1ad --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/configuration/pig-env.xml @@ -0,0 +1,34 @@ + + + + + + + + content + pig-env.sh content + +JAVA_HOME={{java64_home}} +HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}} + + + + http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/metainfo.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/metainfo.xml index fb09b2f..7f2ec7a 100644 --- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/metainfo.xml +++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/metainfo.xml @@ -52,7 +52,7 @@ - global + pig-env pig-log4j http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/params.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/params.py index c44acfa..b8cb82a 100644 --- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/params.py +++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/params.py @@ -26,13 +26,14 @@ config = Script.get_config() pig_conf_dir = "/etc/pig/conf" hadoop_conf_dir = "/etc/hadoop/conf" -hdfs_user = config['configurations']['global']['hdfs_user'] -smokeuser = config['configurations']['global']['smokeuser'] -user_group = config['configurations']['global']['user_group'] +hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] +smokeuser = config['configurations']['hadoop-env']['smokeuser'] +user_group = config['configurations']['hadoop-env']['user_group'] _authentication = config['configurations']['core-site']['hadoop.security.authentication'] security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos') -smoke_user_keytab = config['configurations']['global']['smokeuser_keytab'] -kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"]) +smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab'] +kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"]) +pig_env_sh_template = config['configurations']['pig-env']['content'] # not supporting 32 bit jdk. java64_home = config['hostLevelParams']['java_home'] http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/pig.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/pig.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/pig.py index 8a8cd52..e73a0d0 100644 --- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/pig.py +++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/pig.py @@ -30,8 +30,13 @@ def pig(): group = params.user_group ) - pig_TemplateConfig( ['pig-env.sh','pig.properties']) - + File(format("{pig_conf_dir}/pig-env.sh"), + owner=params.hdfs_user, + content=InlineTemplate(params.pig_env_sh_template) + ) + + pig_TemplateConfig( ['pig.properties']) + if (params.log4j_props != None): File(format("{params.pig_conf_dir}/log4j.properties"), mode=0644,