Return-Path: X-Original-To: apmail-ambari-commits-archive@www.apache.org Delivered-To: apmail-ambari-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id ADC0C10D89 for ; Mon, 23 Dec 2013 21:18:32 +0000 (UTC) Received: (qmail 67722 invoked by uid 500); 23 Dec 2013 21:18:31 -0000 Delivered-To: apmail-ambari-commits-archive@ambari.apache.org Received: (qmail 67387 invoked by uid 500); 23 Dec 2013 21:18:30 -0000 Mailing-List: contact commits-help@ambari.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: ambari-dev@ambari.apache.org Delivered-To: mailing list commits@ambari.apache.org Received: (qmail 67180 invoked by uid 99); 23 Dec 2013 21:18:30 -0000 Received: from tyr.zones.apache.org (HELO tyr.zones.apache.org) (140.211.11.114) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 23 Dec 2013 21:18:30 +0000 Received: by tyr.zones.apache.org (Postfix, from userid 65534) id 04AE088F79A; Mon, 23 Dec 2013 21:18:29 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: dmitriusan@apache.org To: commits@ambari.apache.org Date: Mon, 23 Dec 2013 21:18:43 -0000 Message-Id: <60743fc64baf4c7cbf8f1f8f92cd35f4@git.apache.org> In-Reply-To: <4c22abcbbe9845f6926288922a8ce92f@git.apache.org> References: <4c22abcbbe9845f6926288922a8ce92f@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [15/29] AMBARI-4159. Enable 2.0.8 stack (with python services support) (dlysnichenko) http://git-wip-us.apache.org/repos/asf/ambari/blob/ef81b392/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zoo.cfg.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zoo.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zoo.cfg.j2 new file mode 100644 index 0000000..63dc6cf --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zoo.cfg.j2 @@ -0,0 +1,44 @@ +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# +# + +# The number of milliseconds of each tick +tickTime={{tickTime}} +# The number of ticks that the initial +# synchronization phase can take +initLimit={{initLimit}} +# The number of ticks that can pass between +# sending a request and getting an acknowledgement +syncLimit={{syncLimit}} +# the directory where the snapshot is stored. +dataDir={{zk_data_dir}} +# the port at which the clients will connect +clientPort={{clientPort}} +{% for host in zookeeper_hosts %} +server.{{loop.index}}={{host}}:2888:3888 +{% endfor %} + +{% if security_enabled %} +authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider +jaasLoginRenew=3600000 +kerberos.removeHostFromPrincipal=true +kerberos.removeRealmFromPrincipal=true +{% endif %} http://git-wip-us.apache.org/repos/asf/ambari/blob/ef81b392/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2 new file mode 100644 index 0000000..493a2a4 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2 @@ -0,0 +1,25 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +export JAVA_HOME={{java64_home}} +export ZOO_LOG_DIR={{zk_log_dir}} +export ZOOPIDFILE={{zk_pid_file}} +export SERVER_JVMFLAGS={{zk_server_heapsize}} +export JAVA=$JAVA_HOME/bin/java +export CLASSPATH=$CLASSPATH:/usr/share/zookeeper/* + +{% if security_enabled %} +export SERVER_JVMFLAGS="$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}" +export CLIENT_JVMFLAGS="$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}" +{% endif %} http://git-wip-us.apache.org/repos/asf/ambari/blob/ef81b392/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2 new file mode 100644 index 0000000..696718e --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2 @@ -0,0 +1,5 @@ +Client { +com.sun.security.auth.module.Krb5LoginModule required +useKeyTab=false +useTicketCache=true; +}; http://git-wip-us.apache.org/repos/asf/ambari/blob/ef81b392/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2 new file mode 100644 index 0000000..aa123e1 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2 @@ -0,0 +1,8 @@ +Server { +com.sun.security.auth.module.Krb5LoginModule required +useKeyTab=true +storeKey=true +useTicketCache=false +keyTab="{{zk_keytab_path}}" +principal="{{zk_principal}}"; +}; http://git-wip-us.apache.org/repos/asf/ambari/blob/ef81b392/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/files/changeToSecureUid.sh ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/files/changeToSecureUid.sh b/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/files/changeToSecureUid.sh deleted file mode 100644 index 4872a10..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/files/changeToSecureUid.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/sh -# -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -username=$1 -directories=$2 - -function find_available_uid() { - for ((i=1001; i<=2000; i++)) - do - grep -q $i /etc/passwd - if [ "$?" -ne 0 ] - then - newUid=$i - break - fi - done -} - -find_available_uid - -if [ $newUid -eq 0 ] -then - echo "Failed to find Uid between 1000 and 2000" - exit 1 -fi - -dir_array=($(echo $directories | sed 's/,/\n/g')) -old_uid=$(id -u $username) -echo "Changing uid of $username from $old_uid to $newUid" -echo "Changing directory permisions for ${dir_array[@]}" -usermod -u $newUid $username && for dir in ${dir_array[@]} ; do chown -Rh $newUid $dir ; done -exit 0 http://git-wip-us.apache.org/repos/asf/ambari/blob/ef81b392/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/files/checkForFormat.sh ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/files/checkForFormat.sh deleted file mode 100644 index d14091a..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/files/checkForFormat.sh +++ /dev/null @@ -1,62 +0,0 @@ -#!/bin/sh -# -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -# - -export hdfs_user=$1 -shift -export conf_dir=$1 -shift -export mark_dir=$1 -shift -export name_dirs=$* - -export EXIT_CODE=0 -export command="namenode -format" -export list_of_non_empty_dirs="" - -mark_file=/var/run/hadoop/hdfs/namenode-formatted -if [[ -f ${mark_file} ]] ; then - rm -f ${mark_file} - mkdir -p ${mark_dir} -fi - -if [[ ! -d $mark_dir ]] ; then - for dir in `echo $name_dirs | tr ',' ' '` ; do - echo "NameNode Dirname = $dir" - cmd="ls $dir | wc -l | grep -q ^0$" - eval $cmd - if [[ $? -ne 0 ]] ; then - (( EXIT_CODE = $EXIT_CODE + 1 )) - list_of_non_empty_dirs="$list_of_non_empty_dirs $dir" - fi - done - - if [[ $EXIT_CODE == 0 ]] ; then - su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}" - else - echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}" - fi -else - echo "${mark_dir} exists. Namenode DFS already formatted" -fi - -exit $EXIT_CODE - http://git-wip-us.apache.org/repos/asf/ambari/blob/ef81b392/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/files/task-log4j.properties ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/files/task-log4j.properties b/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/files/task-log4j.properties deleted file mode 100644 index c8939fc..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/files/task-log4j.properties +++ /dev/null @@ -1,132 +0,0 @@ -# -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -# - - -# Define some default values that can be overridden by system properties -hadoop.root.logger=INFO,console -hadoop.log.dir=. -hadoop.log.file=hadoop.log - -# -# Job Summary Appender -# -# Use following logger to send summary to separate file defined by -# hadoop.mapreduce.jobsummary.log.file rolled daily: -# hadoop.mapreduce.jobsummary.logger=INFO,JSA -# -hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger} -hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log - -# Define the root logger to the system property "hadoop.root.logger". -log4j.rootLogger=${hadoop.root.logger}, EventCounter - -# Logging Threshold -log4j.threshhold=ALL - -# -# Daily Rolling File Appender -# - -log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender -log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file} - -# Rollver at midnight -log4j.appender.DRFA.DatePattern=.yyyy-MM-dd - -# 30-day backup -#log4j.appender.DRFA.MaxBackupIndex=30 -log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout - -# Pattern format: Date LogLevel LoggerName LogMessage -log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n -# Debugging Pattern format -#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n - - -# -# console -# Add "console" to rootlogger above if you want to use this -# - -log4j.appender.console=org.apache.log4j.ConsoleAppender -log4j.appender.console.target=System.err -log4j.appender.console.layout=org.apache.log4j.PatternLayout -log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n - -# -# TaskLog Appender -# - -#Default values -hadoop.tasklog.taskid=null -hadoop.tasklog.iscleanup=false -hadoop.tasklog.noKeepSplits=4 -hadoop.tasklog.totalLogFileSize=100 -hadoop.tasklog.purgeLogSplits=true -hadoop.tasklog.logsRetainHours=12 - -log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender -log4j.appender.TLA.taskId=${hadoop.tasklog.taskid} -log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup} -log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize} - -log4j.appender.TLA.layout=org.apache.log4j.PatternLayout -log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n - -# -# Rolling File Appender -# - -#log4j.appender.RFA=org.apache.log4j.RollingFileAppender -#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file} - -# Logfile size and and 30-day backups -#log4j.appender.RFA.MaxFileSize=1MB -#log4j.appender.RFA.MaxBackupIndex=30 - -#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout -#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n -#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n - - -# Custom Logging levels - -hadoop.metrics.log.level=INFO -#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG -#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG -#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG -log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level} - -# Jets3t library -log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR - -# -# Null Appender -# Trap security logger on the hadoop client side -# -log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender - -# -# Event Counter Appender -# Sends counts of logging messages at different severity levels to Hadoop Metrics. -# -log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter - http://git-wip-us.apache.org/repos/asf/ambari/blob/ef81b392/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/scripts/hook.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/scripts/hook.py deleted file mode 100644 index 61e3720..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/scripts/hook.py +++ /dev/null @@ -1,38 +0,0 @@ -##!/usr/bin/env python2.6 -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -""" - -import sys -from resource_management import * -from shared_initialization import * - -#TODO this must be "CONFIGURE" hook when CONFIGURE command will be implemented -class BeforeConfigureHook(Hook): - - def hook(self, env): - import params - - env.set_params(params) - setup_java() - setup_users() - setup_hadoop() - setup_configs() - -if __name__ == "__main__": - BeforeConfigureHook().execute() http://git-wip-us.apache.org/repos/asf/ambari/blob/ef81b392/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/scripts/params.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/scripts/params.py deleted file mode 100644 index 75203ea..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/scripts/params.py +++ /dev/null @@ -1,196 +0,0 @@ -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -""" - -from resource_management import * -from resource_management.core.system import System -import os - -config = Script.get_config() - -#java params -java_home = "/usr/jdk64/jdk1.6.0_31" -artifact_dir = "/tmp/HDP-artifacts/" -jdk_bin = "jdk-6u31-linux-x64.bin" -jce_policy_zip = "jce_policy-6.zip" -jce_location = config['hostLevelParams']['jdk_location'] -jdk_location = config['hostLevelParams']['jdk_location'] -#security params -security_enabled = config['configurations']['global']['security_enabled'] -dfs_journalnode_keytab_file = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file'] -dfs_web_authentication_kerberos_keytab = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file'] -dfs_secondary_namenode_keytab_file = config['configurations']['hdfs-site']['fs.secondary.namenode.keytab.file'] -dfs_datanode_keytab_file = config['configurations']['hdfs-site']['dfs.datanode.keytab.file'] -dfs_namenode_keytab_file = config['configurations']['hdfs-site']['dfs.namenode.keytab.file'] - -dfs_datanode_kerberos_principal = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal'] -dfs_journalnode_kerberos_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.principal'] -dfs_secondary_namenode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.internal.spnego.principal'] -dfs_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal'] -dfs_web_authentication_kerberos_principal = config['configurations']['hdfs-site']['dfs.web.authentication.kerberos.principal'] -dfs_secondary_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.principal'] -dfs_journalnode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.internal.spnego.principal'] - -#users and groups -yarn_user = config['configurations']['global']['yarn_user'] -hbase_user = config['configurations']['global']['hbase_user'] -nagios_user = config['configurations']['global']['nagios_user'] -oozie_user = config['configurations']['global']['oozie_user'] -webhcat_user = config['configurations']['global']['hcat_user'] -hcat_user = config['configurations']['global']['hcat_user'] -hive_user = config['configurations']['global']['hive_user'] -smoke_user = config['configurations']['global']['smokeuser'] -mapred_user = config['configurations']['global']['mapred_user'] -hdfs_user = config['configurations']['global']['hdfs_user'] -zk_user = config['configurations']['global']['zk_user'] -gmetad_user = config['configurations']['global']["gmetad_user"] -gmond_user = config['configurations']['global']["gmond_user"] - -user_group = config['configurations']['global']['user_group'] -proxyuser_group = config['configurations']['global']['proxyuser_group'] -nagios_group = config['configurations']['global']['nagios_group'] -smoke_user_group = "users" -mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group) - -#snmp -snmp_conf_dir = "/etc/snmp/" -snmp_source = "0.0.0.0/0" -snmp_community = "hadoop" - -#hosts -hostname = config["hostname"] -rm_host = default("/clusterHostInfo/rm_host", []) -slave_hosts = default("/clusterHostInfo/slave_hosts", []) -hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", []) -oozie_servers = default("/clusterHostInfo/oozie_server", []) -hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", []) -hive_server_host = default("/clusterHostInfo/hive_server_host", []) -hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", []) -hs_host = default("/clusterHostInfo/hs_host", []) -jtnode_host = default("/clusterHostInfo/jtnode_host", []) -namenode_host = default("/clusterHostInfo/namenode_host", []) -zk_hosts = default("/clusterHostInfo/zookeeper_hosts", []) -ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", []) - -has_resourcemanager = not len(rm_host) == 0 -has_slaves = not len(slave_hosts) == 0 -has_nagios = not len(hagios_server_hosts) == 0 -has_oozie_server = not len(oozie_servers) == 0 -has_hcat_server_host = not len(hcat_server_hosts) == 0 -has_hive_server_host = not len(hive_server_host) == 0 -has_hbase_masters = not len(hbase_master_hosts) == 0 -has_zk_host = not len(zk_hosts) == 0 -has_ganglia_server = not len(ganglia_server_hosts) == 0 - -is_namenode_master = hostname in namenode_host -is_jtnode_master = hostname in jtnode_host -is_rmnode_master = hostname in rm_host -is_hsnode_master = hostname in hs_host -is_hbase_master = hostname in hbase_master_hosts -is_slave = hostname in slave_hosts -if has_ganglia_server: - ganglia_server_host = ganglia_server_hosts[0] -#hadoop params -hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}") -hadoop_lib_home = "/usr/lib/hadoop/lib" -hadoop_conf_dir = "/etc/hadoop/conf" -hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix'] -hadoop_home = "/usr" -hadoop_bin = "/usr/lib/hadoop/sbin" - -task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties") -limits_conf_dir = "/etc/security/limits.d" - -hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix'] -hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir'] -#db params -server_db_name = config['hostLevelParams']['db_name'] -db_driver_filename = config['hostLevelParams']['db_driver_filename'] -oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url'] -mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url'] - -ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url'][0] -ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver'][0] -ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username'][0] -ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0] - -rca_enabled = config['configurations']['global']['rca_enabled'] -rca_disabled_prefix = "###" -if rca_enabled == True: - rca_prefix = "" -else: - rca_prefix = rca_disabled_prefix - -#hadoop-env.sh -java_home = config['configurations']['global']['java64_home'] -if System.get_instance().platform == "suse": - jsvc_path = "/usr/lib/bigtop-utils" -else: - jsvc_path = "/usr/libexec/bigtop-utils" - -hadoop_heapsize = config['configurations']['global']['hadoop_heapsize'] -namenode_heapsize = config['configurations']['global']['namenode_heapsize'] -namenode_opt_newsize = config['configurations']['global']['namenode_opt_newsize'] -namenode_opt_maxnewsize = config['configurations']['global']['namenode_opt_maxnewsize'] - -jtnode_opt_newsize = default("jtnode_opt_newsize","200m") -jtnode_opt_maxnewsize = default("jtnode_opt_maxnewsize","200m") -jtnode_heapsize = default("jtnode_heapsize","1024m") -ttnode_heapsize = "1024m" - -dtnode_heapsize = config['configurations']['global']['dtnode_heapsize'] -mapred_pid_dir_prefix = default("mapred_pid_dir_prefix","/var/run/hadoop-mapreduce") -mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*" -hadoop_libexec_dir = "/usr/lib/hadoop/libexec" -mapred_log_dir_prefix = default("mapred_log_dir_prefix","/var/log/hadoop-mapreduce") - -#taskcontroller.cfg - -mapred_local_dir = "/tmp/hadoop-mapred/mapred/local" - -#log4j.properties - -yarn_log_dir_prefix = default("yarn_log_dir_prefix","/var/log/hadoop-yarn") - -#exclude file -exlude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude'] -if 'hdfs-exclude-file' in config['configurations']: - if 'datanodes' in config['configurations']['hdfs-exclude-file']: - hdfs_exclude_file = config['configurations']['hdfs-exclude-file']['datanodes'].split(",") - else: - hdfs_exclude_file = [] -else: - hdfs_exclude_file = [] - -#hdfs ha properties -dfs_ha_enabled = False -dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None) -dfs_ha_namenode_ids = default(format("hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None) -if dfs_ha_namenode_ids: - dfs_ha_namenode_ids_array_len = len(dfs_ha_namenode_ids.split(",")) - if dfs_ha_namenode_ids_array_len > 1: - dfs_ha_enabled = True - -if dfs_ha_enabled: - for nn_id in dfs_ha_namenode_ids: - nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')] - if hostname in nn_host: - namenode_id = nn_id - namenode_id = None - -dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None) \ No newline at end of file http://git-wip-us.apache.org/repos/asf/ambari/blob/ef81b392/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/scripts/shared_initialization.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/scripts/shared_initialization.py deleted file mode 100644 index cb064f3..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/scripts/shared_initialization.py +++ /dev/null @@ -1,408 +0,0 @@ -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -""" - -import os - -from resource_management import * - -def setup_java(): - """ - Installs jdk using specific params, that comes from ambari-server - """ - import params - - jdk_curl_target = format("{artifact_dir}/{jdk_bin}") - java_dir = os.path.dirname(params.java_home) - java_exec = format("{java_home}/bin/java") - Execute(format("mkdir -p {artifact_dir} ; curl -kf --retry 10 {jdk_location}/{jdk_bin} -o {jdk_curl_target}"), - path = ["/bin","/usr/bin/"], - not_if = format("test -e {java_exec}")) - Execute(format("mkdir -p {java_dir} ; chmod +x {jdk_curl_target}; cd {java_dir} ; echo A | {jdk_curl_target} -noregister > /dev/null 2>&1"), - path = ["/bin","/usr/bin/"], - not_if = format("test -e {java_exec}") - ) - jce_curl_target = format("{artifact_dir}/{jce_policy_zip}") - download_jce = format("mkdir -p {artifact_dir}; curl -kf --retry 10 {jce_location}/{jce_policy_zip} -o {jce_curl_target}") - Execute( download_jce, - path = ["/bin","/usr/bin/"], - not_if =format("test -e {jce_curl_target}"), - ignore_failures = True - ) - security_dir = format("{java_home}/jre/lib/security") - extract_cmd = format("rm -f local_policy.jar; rm -f US_export_policy.jar; unzip -o -j -q {jce_curl_target}") - Execute(extract_cmd, - only_if = format("test -e {security_dir} && test -f {jce_curl_target}"), - cwd = security_dir, - path = ['/bin/','/usr/bin'] - ) - -def setup_users(): - """ - Creates users before cluster installation - """ - import params - - Group(params.user_group) - Group(params.smoke_user_group) - Group(params.proxyuser_group) - User(params.smoke_user, - gid=params.user_group, - groups=[params.proxyuser_group] - ) - smoke_user_dirs = format( - "/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}") - set_uid(params.smoke_user, smoke_user_dirs) - - if params.has_hbase_masters: - User(params.hbase_user, - gid = params.user_group, - groups=[params.user_group]) - hbase_user_dirs = format( - "/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}") - set_uid(params.hbase_user, hbase_user_dirs) - - if params.has_nagios: - Group(params.nagios_group) - User(params.nagios_user, - gid=params.nagios_group) - - if params.has_oozie_server: - User(params.oozie_user, - gid = params.user_group) - - if params.has_hcat_server_host: - User(params.webhcat_user, - gid = params.user_group) - User(params.hcat_user, - gid = params.user_group) - - if params.has_hive_server_host: - User(params.hive_user, - gid = params.user_group) - - if params.has_resourcemanager: - User(params.yarn_user, - gid = params.user_group) - - if params.has_ganglia_server: - Group(params.gmetad_user) - Group(params.gmond_user) - User(params.gmond_user, - gid=params.user_group, - groups=[params.gmond_user]) - User(params.gmetad_user, - gid=params.user_group, - groups=[params.gmetad_user]) - - User(params.hdfs_user, - gid=params.user_group, - groups=[params.user_group] - ) - User(params.mapred_user, - gid=params.user_group, - groups=[params.user_group] - ) - if params.has_zk_host: - User(params.zk_user, - gid=params.user_group) - - -def setup_hadoop(): - """ - Setup hadoop files and directories - """ - import params - - File(os.path.join(params.snmp_conf_dir, 'snmpd.conf'), - content=Template("snmpd.conf.j2")) - Service("snmpd", - action = "restart") - - Execute("/bin/echo 0 > /selinux/enforce", - only_if="test -f /selinux/enforce" - ) - - install_snappy() - - #directories - Directory(params.hadoop_conf_dir, - recursive=True, - owner='root', - group='root' - ) - Directory(params.hdfs_log_dir_prefix, - recursive=True, - owner='root', - group='root' - ) - Directory(params.hadoop_pid_dir_prefix, - recursive=True, - owner='root', - group='root' - ) - #this doesn't needed with stack 1 - Directory(os.path.dirname(params.hadoop_tmp_dir), - recursive=True, - owner=params.hdfs_user, - ) - #files - File(os.path.join(params.limits_conf_dir, 'hdfs.conf'), - owner='root', - group='root', - mode=0644, - content=Template("hdfs.conf.j2") - ) - if params.security_enabled: - File(os.path.join(params.hadoop_bin, "task-controller"), - owner="root", - group=params.mapred_tt_group, - mode=6050 - ) - tc_mode = 0644 - tc_owner = "root" - else: - tc_mode = None - tc_owner = params.hdfs_user - - if tc_mode: - File(os.path.join(params.hadoop_conf_dir, 'taskcontroller.cfg'), - owner=tc_owner, - mode=tc_mode, - content=Template("taskcontroller.cfg.j2") - ) - else: - File(os.path.join(params.hadoop_conf_dir, 'taskcontroller.cfg'), - owner=tc_owner, - content=Template("taskcontroller.cfg.j2") - ) - for file in ['hadoop-env.sh', 'commons-logging.properties', 'slaves']: - File(os.path.join(params.hadoop_conf_dir, file), - owner=tc_owner, - content=Template(file + ".j2") - ) - - health_check_template = "health_check-v2" #for stack 1 use 'health_check' - File(os.path.join(params.hadoop_conf_dir, "health_check"), - owner=tc_owner, - content=Template(health_check_template + ".j2") - ) - - File(os.path.join(params.hadoop_conf_dir, "log4j.properties"), - owner=params.hdfs_user, - content=Template("log4j.properties.j2") - ) - - update_log4j_props(os.path.join(params.hadoop_conf_dir, "log4j.properties")) - - File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"), - owner=params.hdfs_user, - content=Template("hadoop-metrics2.properties.j2") - ) - - db_driver_dload_cmd = "" - if params.server_db_name == 'oracle' and params.oracle_driver_url != "": - db_driver_dload_cmd = format( - "curl -kf --retry 5 {oracle_driver_url} -o {hadoop_lib_home}/{db_driver_filename}") - elif params.server_db_name == 'mysql' and params.mysql_driver_url != "": - db_driver_dload_cmd = format( - "curl -kf --retry 5 {mysql_driver_url} -o {hadoop_lib_home}/{db_driver_filename}") - - if db_driver_dload_cmd: - Execute(db_driver_dload_cmd, - not_if =format("test -e {hadoop_lib_home}/{db_driver_filename}") - ) - - -def setup_configs(): - """ - Creates configs for services DHFS mapred - """ - import params - - if "mapred-queue-acls" in params.config['configurations']: - XmlConfig("mapred-queue-acls.xml", - conf_dir=params.hadoop_conf_dir, - configurations=params.config['configurations'][ - 'mapred-queue-acls'], - owner=params.mapred_user, - group=params.user_group - ) - elif os.path.exists( - os.path.join(params.hadoop_conf_dir, "mapred-queue-acls.xml")): - File(os.path.join(params.hadoop_conf_dir, "mapred-queue-acls.xml"), - owner=params.mapred_user, - group=params.user_group - ) - - if "hadoop-policy" in params.config['configurations']: - XmlConfig("hadoop-policy.xml", - conf_dir=params.hadoop_conf_dir, - configurations=params.config['configurations']['hadoop-policy'], - owner=params.hdfs_user, - group=params.user_group - ) - - XmlConfig("core-site.xml", - conf_dir=params.hadoop_conf_dir, - configurations=params.config['configurations']['core-site'], - owner=params.hdfs_user, - group=params.user_group - ) - - if "mapred-site" in params.config['configurations']: - XmlConfig("mapred-site.xml", - conf_dir=params.hadoop_conf_dir, - configurations=params.config['configurations']['mapred-site'], - owner=params.mapred_user, - group=params.user_group - ) - - File(params.task_log4j_properties_location, - content=StaticFile("task-log4j.properties"), - mode=0755 - ) - - if "capacity-scheduler" in params.config['configurations']: - XmlConfig("capacity-scheduler.xml", - conf_dir=params.hadoop_conf_dir, - configurations=params.config['configurations'][ - 'capacity-scheduler'], - owner=params.hdfs_user, - group=params.user_group - ) - - XmlConfig("hdfs-site.xml", - conf_dir=params.hadoop_conf_dir, - configurations=params.config['configurations']['hdfs-site'], - owner=params.hdfs_user, - group=params.user_group - ) - - # if params.stack_version[0] == "1": - # Link('/usr/lib/hadoop/hadoop-tools.jar', - # to = '/usr/lib/hadoop/lib/hadoop-tools.jar', - # mode = 0755 - # ) - - if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')): - File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'), - owner=params.hdfs_user, - group=params.user_group - ) - if os.path.exists(os.path.join(params.hadoop_conf_dir, 'fair-scheduler.xml')): - File(os.path.join(params.hadoop_conf_dir, 'fair-scheduler.xml'), - owner=params.mapred_user, - group=params.user_group - ) - if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')): - Directory(os.path.join(params.hadoop_conf_dir, 'masters'), - owner=params.hdfs_user, - group=params.user_group - ) - if os.path.exists( - os.path.join(params.hadoop_conf_dir, 'ssl-client.xml.example')): - File(os.path.join(params.hadoop_conf_dir, 'ssl-client.xml.example'), - owner=params.mapred_user, - group=params.user_group - ) - if os.path.exists( - os.path.join(params.hadoop_conf_dir, 'ssl-server.xml.example')): - File(os.path.join(params.hadoop_conf_dir, 'ssl-server.xml.example'), - owner=params.mapred_user, - group=params.user_group - ) - - generate_exlude_file() - generate_include_file() - - -def set_uid(user, user_dirs): - """ - user_dirs - comma separated directories - """ - File("/tmp/changeUid.sh", - content=StaticFile("changeToSecureUid.sh"), - mode=0555) - Execute(format("/tmp/changeUid.sh {user} {user_dirs} 2>/dev/null"), - not_if = format("test $(id -u {user}) -gt 1000")) - - -def update_log4j_props(file): - import params - - property_map = { - 'ambari.jobhistory.database': params.ambari_db_rca_url, - 'ambari.jobhistory.driver': params.ambari_db_rca_driver, - 'ambari.jobhistory.user': params.ambari_db_rca_username, - 'ambari.jobhistory.password': params.ambari_db_rca_password, - 'ambari.jobhistory.logger': 'DEBUG,JHA', - - 'log4j.appender.JHA': 'org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.JobHistoryAppender', - 'log4j.appender.JHA.database': '${ambari.jobhistory.database}', - 'log4j.appender.JHA.driver': '${ambari.jobhistory.driver}', - 'log4j.appender.JHA.user': '${ambari.jobhistory.user}', - 'log4j.appender.JHA.password': '${ambari.jobhistory.password}', - - 'log4j.logger.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger': '${ambari.jobhistory.logger}', - 'log4j.additivity.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger': 'true' - } - for key in property_map: - value = property_map[key] - Execute(format( - "sed -i 's~\\({rca_disabled_prefix}\\)\\?{key}=.*~{rca_prefix}{key}={value}~' {file}")) - - -def generate_exlude_file(): - import params - - File(params.exlude_file_path, - content=Template("exclude_hosts_list.j2"), - owner=params.hdfs_user, - group=params.user_group - ) - - -def generate_include_file(): - import params - - if params.dfs_hosts and params.has_slaves: - include_hosts_list = params.slave_hosts - File(params.dfs_hosts, - content=Template("include_hosts_list.j2"), - owner=params.hdfs_user, - group=params.user_group - ) - - -def install_snappy(): - import params - - snappy_so = "libsnappy.so" - so_target_dir_x86 = format("{hadoop_lib_home}/native/Linux-i386-32") - so_target_dir_x64 = format("{hadoop_lib_home}/native/Linux-amd64-64") - so_target_x86 = format("{so_target_dir_x86}/{snappy_so}") - so_target_x64 = format("{so_target_dir_x64}/{snappy_so}") - so_src_dir_x86 = format("{hadoop_home}/lib") - so_src_dir_x64 = format("{hadoop_home}/lib64") - so_src_x86 = format("{so_src_dir_x86}/{snappy_so}") - so_src_x64 = format("{so_src_dir_x64}/{snappy_so}") - Execute( - format("mkdir -p {so_target_dir_x86}; ln -sf {so_src_x86} {so_target_x86}")) - Execute( - format("mkdir -p {so_target_dir_x64}; ln -sf {so_src_x64} {so_target_x64}")) http://git-wip-us.apache.org/repos/asf/ambari/blob/ef81b392/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/commons-logging.properties.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/commons-logging.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/commons-logging.properties.j2 deleted file mode 100644 index 77e458f..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/commons-logging.properties.j2 +++ /dev/null @@ -1,25 +0,0 @@ -#/* -# * Licensed to the Apache Software Foundation (ASF) under one -# * or more contributor license agreements. See the NOTICE file -# * distributed with this work for additional information -# * regarding copyright ownership. The ASF licenses this file -# * to you under the Apache License, Version 2.0 (the -# * "License"); you may not use this file except in compliance -# * with the License. You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ - -#Logging Implementation - -#Log4J -org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger - -#JDK Logger -#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger http://git-wip-us.apache.org/repos/asf/ambari/blob/ef81b392/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/exclude_hosts_list.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/exclude_hosts_list.j2 deleted file mode 100644 index bb5795b..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/exclude_hosts_list.j2 +++ /dev/null @@ -1,3 +0,0 @@ -{% for host in hdfs_exclude_file %} -{{host}} -{% endfor %} http://git-wip-us.apache.org/repos/asf/ambari/blob/ef81b392/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/hadoop-env.sh.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/hadoop-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/hadoop-env.sh.j2 deleted file mode 100644 index 7d10cc3..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/hadoop-env.sh.j2 +++ /dev/null @@ -1,121 +0,0 @@ -#/* -# * Licensed to the Apache Software Foundation (ASF) under one -# * or more contributor license agreements. See the NOTICE file -# * distributed with this work for additional information -# * regarding copyright ownership. The ASF licenses this file -# * to you under the Apache License, Version 2.0 (the -# * "License"); you may not use this file except in compliance -# * with the License. You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ - -# Set Hadoop-specific environment variables here. - -# The only required environment variable is JAVA_HOME. All others are -# optional. When running a distributed configuration it is best to -# set JAVA_HOME in this file, so that it is correctly defined on -# remote nodes. - -# The java implementation to use. Required. -export JAVA_HOME={{java_home}} -export HADOOP_HOME_WARN_SUPPRESS=1 - -# Hadoop Configuration Directory -#TODO: if env var set that can cause problems -export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}} - -{# this is different for HDP1 #} -# Path to jsvc required by secure HDP 2.0 datanode -export JSVC_HOME={{jsvc_path}} - - -# The maximum amount of heap to use, in MB. Default is 1000. -export HADOOP_HEAPSIZE="{{hadoop_heapsize}}" - -export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}" - -# Extra Java runtime options. Empty by default. -export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}" - -# Command specific options appended to HADOOP_OPTS when specified -export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}" -HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}" - -HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}" -HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}" -HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}" - -export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}" - -# The following applies to multiple commands (fs, dfs, fsck, distcp etc) -export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS" -# On secure datanodes, user to run the datanode as after dropping privileges -export HADOOP_SECURE_DN_USER={{hdfs_user}} - -# Extra ssh options. Empty by default. -export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR" - -# Where log files are stored. $HADOOP_HOME/logs by default. -export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER - -# History server logs -export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER - -# Where log files are stored in the secure data environment. -export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER - -# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default. -# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves - -# host:path where hadoop code should be rsync'd from. Unset by default. -# export HADOOP_MASTER=master:/home/$USER/src/hadoop - -# Seconds to sleep between slave commands. Unset by default. This -# can be useful in large clusters, where, e.g., slave rsyncs can -# otherwise arrive faster than the master can service them. -# export HADOOP_SLAVE_SLEEP=0.1 - -# The directory where pid files are stored. /tmp by default. -export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER -export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER - -# History server pid -export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER - -YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY" - -# A string representing this instance of hadoop. $USER by default. -export HADOOP_IDENT_STRING=$USER - -# The scheduling priority for daemon processes. See 'man nice'. - -# export HADOOP_NICENESS=10 - -# Use libraries from standard classpath -JAVA_JDBC_LIBS="" -#Add libraries required by mysql connector -for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null` -do - JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile -done -#Add libraries required by oracle connector -for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null` -do - JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile -done -#Add libraries required by nodemanager -MAPREDUCE_LIBS={{mapreduce_libs_path}} -export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS} - -# Setting path to hdfs command line -export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}} - -#Mostly required for hadoop 2.0 -export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64 http://git-wip-us.apache.org/repos/asf/ambari/blob/ef81b392/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/hadoop-metrics2.properties.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/hadoop-metrics2.properties.j2 deleted file mode 100644 index 990f42d..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/hadoop-metrics2.properties.j2 +++ /dev/null @@ -1,45 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# syntax: [prefix].[source|sink|jmx].[instance].[options] -# See package.html for org.apache.hadoop.metrics2 for details - -{% if has_ganglia_server %} -*.period=60 - -*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31 -*.sink.ganglia.period=10 - -# default for supportsparse is false -*.sink.ganglia.supportsparse=true - -.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both -.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40 - -# Hook up to the server -namenode.sink.ganglia.servers={{ganglia_server_host}}:8661 -datanode.sink.ganglia.servers={{ganglia_server_host}}:8660 -jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662 -tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8660 -maptask.sink.ganglia.servers={{ganglia_server_host}}:8660 -reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660 -resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664 -nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8660 -historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666 -journalnode.sink.ganglia.servers={{ganglia_server_host}}:8660 - -resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue - -{% endif %} http://git-wip-us.apache.org/repos/asf/ambari/blob/ef81b392/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/hdfs.conf.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/hdfs.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/hdfs.conf.j2 deleted file mode 100644 index ca7baa2..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/hdfs.conf.j2 +++ /dev/null @@ -1,17 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -{{hdfs_user}} - nofile 32768 -{{hdfs_user}} - nproc 65536 http://git-wip-us.apache.org/repos/asf/ambari/blob/ef81b392/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/health_check-v2.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/health_check-v2.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/health_check-v2.j2 deleted file mode 100644 index cb7b12b..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/health_check-v2.j2 +++ /dev/null @@ -1,91 +0,0 @@ -#!/bin/bash -# -#/* -# * Licensed to the Apache Software Foundation (ASF) under one -# * or more contributor license agreements. See the NOTICE file -# * distributed with this work for additional information -# * regarding copyright ownership. The ASF licenses this file -# * to you under the Apache License, Version 2.0 (the -# * "License"); you may not use this file except in compliance -# * with the License. You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ - -err=0; - -function check_disks { - - for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do - fsdev="" - fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`; - if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then - msg_="$msg_ $m(u)" - else - msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`" - fi - done - - if [ -z "$msg_" ] ; then - echo "disks ok" ; exit 0 - else - echo "$msg_" ; exit 2 - fi - -} - -function check_link { - snmp=/usr/bin/snmpwalk - if [ -e $snmp ] ; then - $snmp -t 5 -Oe -Oq -Os -v 1 -c public localhost if | \ - awk ' { - split($1,a,".") ; - if ( a[1] == "ifIndex" ) { ifIndex[a[2]] = $2 } - if ( a[1] == "ifDescr" ) { ifDescr[a[2]] = $2 } - if ( a[1] == "ifType" ) { ifType[a[2]] = $2 } - if ( a[1] == "ifSpeed" ) { ifSpeed[a[2]] = $2 } - if ( a[1] == "ifAdminStatus" ) { ifAdminStatus[a[2]] = $2 } - if ( a[1] == "ifOperStatus" ) { ifOperStatus[a[2]] = $2 } - } - END { - up=0; - for (i in ifIndex ) { - if ( ifType[i] == 6 && ifAdminStatus[i] == 1 && ifOperStatus[i] == 1 && ifSpeed[i] == 1000000000 ) { - up=i; - } - } - if ( up == 0 ) { print "check link" ; exit 2 } - else { print ifDescr[up],"ok" } - }' - exit $? ; - fi -} - -# Run all checks -# Disabled 'check_link' for now... -for check in disks ; do - msg=`check_${check}` ; - if [ $? -eq 0 ] ; then - ok_msg="$ok_msg$msg," - else - err_msg="$err_msg$msg," - fi -done - -if [ ! -z "$err_msg" ] ; then - echo -n "ERROR $err_msg " -fi -if [ ! -z "$ok_msg" ] ; then - echo -n "OK: $ok_msg" -fi - -echo - -# Success! -exit 0 http://git-wip-us.apache.org/repos/asf/ambari/blob/ef81b392/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/health_check.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/health_check.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/health_check.j2 deleted file mode 100644 index b84b336..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/health_check.j2 +++ /dev/null @@ -1,118 +0,0 @@ -#!/bin/bash -# -#/* -# * Licensed to the Apache Software Foundation (ASF) under one -# * or more contributor license agreements. See the NOTICE file -# * distributed with this work for additional information -# * regarding copyright ownership. The ASF licenses this file -# * to you under the Apache License, Version 2.0 (the -# * "License"); you may not use this file except in compliance -# * with the License. You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ - -err=0; - -function check_disks { - - for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do - fsdev="" - fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`; - if [ -z "$fsdev" ] ; then - msg_="$msg_ $m(u)" - else - msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`" - fi - done - - if [ -z "$msg_" ] ; then - echo "disks ok" ; exit 0 - else - echo "$msg_" ; exit 2 - fi - -} - -function check_taskcontroller { - if [ "<%=scope.function_hdp_template_var("::hdp::params::security_enabled")%>" == "true" ]; then - perm=`stat -c %a:%U:%G <%=scope.function_hdp_template_var("task_bin_exe")%> 2>/dev/null` - if [ $? -eq 0 ] && [ "$perm" == "6050:root:hadoop" ] ; then - echo "taskcontroller ok" - else - echo 'check taskcontroller' ; exit 1 - fi - fi -} - -function check_jetty { - hname=`hostname` - jmx=`curl -s -S -m 5 "http://$hname:<%=scope.function_hdp_template_var("::hdp::tasktracker_port")%>/jmx?qry=Hadoop:service=TaskTracker,name=ShuffleServerMetrics" 2>/dev/null` ; - if [ $? -eq 0 ] ; then - e=`echo $jmx | awk '/shuffle_exceptions_caught/ {printf"%d",$2}'` ; - e=${e:-0} # no jmx servlet ? - if [ $e -gt 10 ] ; then - echo "check jetty: shuffle_exceptions=$e" ; exit 1 - else - echo "jetty ok" - fi - else - echo "check jetty: ping failed" ; exit 1 - fi -} - -function check_link { - snmp=/usr/bin/snmpwalk - if [ -e $snmp ] ; then - $snmp -t 5 -Oe -Oq -Os -v 1 -c public localhost if | \ - awk ' { - split($1,a,".") ; - if ( a[1] == "ifIndex" ) { ifIndex[a[2]] = $2 } - if ( a[1] == "ifDescr" ) { ifDescr[a[2]] = $2 } - if ( a[1] == "ifType" ) { ifType[a[2]] = $2 } - if ( a[1] == "ifSpeed" ) { ifSpeed[a[2]] = $2 } - if ( a[1] == "ifAdminStatus" ) { ifAdminStatus[a[2]] = $2 } - if ( a[1] == "ifOperStatus" ) { ifOperStatus[a[2]] = $2 } - } - END { - up=0; - for (i in ifIndex ) { - if ( ifType[i] == 6 && ifAdminStatus[i] == 1 && ifOperStatus[i] == 1 && ifSpeed[i] == 1000000000 ) { - up=i; - } - } - if ( up == 0 ) { print "check link" ; exit 2 } - else { print ifDescr[up],"ok" } - }' - exit $? ; - fi -} - -# Run all checks -# Disabled 'check_link' for now... -for check in disks taskcontroller jetty; do - msg=`check_${check}` ; - if [ $? -eq 0 ] ; then - ok_msg="$ok_msg$msg," - else - err_msg="$err_msg$msg," - fi -done - -if [ ! -z "$err_msg" ] ; then - echo -n "ERROR $err_msg " -fi -if [ ! -z "$ok_msg" ] ; then - echo -n "OK: $ok_msg" -fi - -echo - -# Success! -exit 0 http://git-wip-us.apache.org/repos/asf/ambari/blob/ef81b392/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/include_hosts_list.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/include_hosts_list.j2 deleted file mode 100644 index cbcf6c3..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/include_hosts_list.j2 +++ /dev/null @@ -1,3 +0,0 @@ -{% for host in slave_hosts %} -{{host}} -{% endfor %} http://git-wip-us.apache.org/repos/asf/ambari/blob/ef81b392/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/log4j.properties.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/log4j.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/log4j.properties.j2 deleted file mode 100644 index 6c02292..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/log4j.properties.j2 +++ /dev/null @@ -1,218 +0,0 @@ -# Copyright 2011 The Apache Software Foundation -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Define some default values that can be overridden by system properties -hadoop.root.logger=INFO,console -hadoop.log.dir=. -hadoop.log.file=hadoop.log - - -# Define the root logger to the system property "hadoop.root.logger". -log4j.rootLogger=${hadoop.root.logger}, EventCounter - -# Logging Threshold -log4j.threshhold=ALL - -# -# Daily Rolling File Appender -# - -log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender -log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file} - -# Rollver at midnight -log4j.appender.DRFA.DatePattern=.yyyy-MM-dd - -# 30-day backup -#log4j.appender.DRFA.MaxBackupIndex=30 -log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout - -# Pattern format: Date LogLevel LoggerName LogMessage -log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n -# Debugging Pattern format -#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n - - -# -# console -# Add "console" to rootlogger above if you want to use this -# - -log4j.appender.console=org.apache.log4j.ConsoleAppender -log4j.appender.console.target=System.err -log4j.appender.console.layout=org.apache.log4j.PatternLayout -log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n - -# -# TaskLog Appender -# - -#Default values -hadoop.tasklog.taskid=null -hadoop.tasklog.iscleanup=false -hadoop.tasklog.noKeepSplits=4 -hadoop.tasklog.totalLogFileSize=100 -hadoop.tasklog.purgeLogSplits=true -hadoop.tasklog.logsRetainHours=12 - -log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender -log4j.appender.TLA.taskId=${hadoop.tasklog.taskid} -log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup} -log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize} - -log4j.appender.TLA.layout=org.apache.log4j.PatternLayout -log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n - -# -#Security audit appender -# -hadoop.security.logger=INFO,console -hadoop.security.log.maxfilesize=256MB -hadoop.security.log.maxbackupindex=20 -log4j.category.SecurityLogger=${hadoop.security.logger} -hadoop.security.log.file=SecurityAuth.audit -log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender -log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file} -log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout -log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n -log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd - -log4j.appender.RFAS=org.apache.log4j.RollingFileAppender -log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file} -log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout -log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n -log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize} -log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex} - -# -# hdfs audit logging -# -hdfs.audit.logger=INFO,console -log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger} -log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false -log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender -log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log -log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout -log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n -log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd - -# -# mapred audit logging -# -mapred.audit.logger=INFO,console -log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger} -log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false -log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender -log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log -log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout -log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n -log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd - -# -# Rolling File Appender -# - -log4j.appender.RFA=org.apache.log4j.RollingFileAppender -log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file} - -# Logfile size and and 30-day backups -log4j.appender.RFA.MaxFileSize=256MB -log4j.appender.RFA.MaxBackupIndex=10 - -log4j.appender.RFA.layout=org.apache.log4j.PatternLayout -log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n -log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n - - -# Custom Logging levels - -hadoop.metrics.log.level=INFO -#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG -#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG -#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG -log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level} - -# Jets3t library -log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR - -# -# Null Appender -# Trap security logger on the hadoop client side -# -log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender - -# -# Event Counter Appender -# Sends counts of logging messages at different severity levels to Hadoop Metrics. -# -log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter - -{% if is_jtnode_master or is_rmnode_master %} -# -# Job Summary Appender -# -# Use following logger to send summary to separate file defined by -# hadoop.mapreduce.jobsummary.log.file rolled daily: -# hadoop.mapreduce.jobsummary.logger=INFO,JSA -# -hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger} -hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log -log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender -# Set the ResourceManager summary log filename -yarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log -# Set the ResourceManager summary log level and appender -yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger} -#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY - -# To enable AppSummaryLogging for the RM, -# set yarn.server.resourcemanager.appsummary.logger to -# ,RMSUMMARY in hadoop-env.sh - -# Appender for ResourceManager Application Summary Log -# Requires the following properties to be set -# - hadoop.log.dir (Hadoop Log directory) -# - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename) -# - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender) -log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender -log4j.appender.RMSUMMARY.File={{yarn_log_dir_prefix}}/{{yarn_user}}/${yarn.server.resourcemanager.appsummary.log.file} -log4j.appender.RMSUMMARY.MaxFileSize=256MB -log4j.appender.RMSUMMARY.MaxBackupIndex=20 -log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout -log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n -log4j.appender.JSA.layout=org.apache.log4j.PatternLayout -log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n -log4j.appender.JSA.DatePattern=.yyyy-MM-dd -log4j.appender.JSA.layout=org.apache.log4j.PatternLayout -log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger} -log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false -{% endif %} - -{{rca_prefix}}ambari.jobhistory.database={{ambari_db_rca_url}} -{{rca_prefix}}ambari.jobhistory.driver={{ambari_db_rca_driver}} -{{rca_prefix}}ambari.jobhistory.user={{ambari_db_rca_username}} -{{rca_prefix}}ambari.jobhistory.password={{ambari_db_rca_password}} -{{rca_prefix}}ambari.jobhistory.logger=DEBUG,JHA - -{{rca_prefix}}log4j.appender.JHA=org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.JobHistoryAppender -{{rca_prefix}}log4j.appender.JHA.database=${ambari.jobhistory.database} -{{rca_prefix}}log4j.appender.JHA.driver=${ambari.jobhistory.driver} -{{rca_prefix}}log4j.appender.JHA.user=${ambari.jobhistory.user} -{{rca_prefix}}log4j.appender.JHA.password=${ambari.jobhistory.password} - -{{rca_prefix}}log4j.logger.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=${ambari.jobhistory.logger} -{{rca_prefix}}log4j.additivity.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=true http://git-wip-us.apache.org/repos/asf/ambari/blob/ef81b392/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/slaves.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/slaves.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/slaves.j2 deleted file mode 100644 index cbcf6c3..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/slaves.j2 +++ /dev/null @@ -1,3 +0,0 @@ -{% for host in slave_hosts %} -{{host}} -{% endfor %} http://git-wip-us.apache.org/repos/asf/ambari/blob/ef81b392/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/snmpd.conf.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/snmpd.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/snmpd.conf.j2 deleted file mode 100644 index 3530444..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/snmpd.conf.j2 +++ /dev/null @@ -1,48 +0,0 @@ -#/* -# * Licensed to the Apache Software Foundation (ASF) under one -# * or more contributor license agreements. See the NOTICE file -# * distributed with this work for additional information -# * regarding copyright ownership. The ASF licenses this file -# * to you under the Apache License, Version 2.0 (the -# * "License"); you may not use this file except in compliance -# * with the License. You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ - -com2sec notConfigUser {{snmp_source}} {{snmp_community}} -group notConfigGroup v1 notConfigUser -group notConfigGroup v2c notConfigUser -view systemview included .1 -access notConfigGroup "" any noauth exact systemview none none - -syslocation Hadoop -syscontact HadoopMaster -dontLogTCPWrappersConnects yes - -############################################################################### -# disk checks - -disk / 10000 - - -############################################################################### -# load average checks -# - -# load [1MAX=12.0] [5MAX=12.0] [15MAX=12.0] -# -# 1MAX: If the 1 minute load average is above this limit at query -# time, the errorFlag will be set. -# 5MAX: Similar, but for 5 min average. -# 15MAX: Similar, but for 15 min average. - -# Check for loads: -#load 12 14 14 - http://git-wip-us.apache.org/repos/asf/ambari/blob/ef81b392/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/taskcontroller.cfg.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/taskcontroller.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/taskcontroller.cfg.j2 deleted file mode 100644 index d01d37e..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/2.0._/hooks/before-START/templates/taskcontroller.cfg.j2 +++ /dev/null @@ -1,20 +0,0 @@ -#/* -# * Licensed to the Apache Software Foundation (ASF) under one -# * or more contributor license agreements. See the NOTICE file -# * distributed with this work for additional information -# * regarding copyright ownership. The ASF licenses this file -# * to you under the Apache License, Version 2.0 (the -# * "License"); you may not use this file except in compliance -# * with the License. You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ -mapred.local.dir={{mapred_local_dir}} -mapreduce.tasktracker.group={{mapred_tt_group}} -hadoop.log.dir={{hdfs_log_dir_prefix}}/{{mapred_user}} http://git-wip-us.apache.org/repos/asf/ambari/blob/ef81b392/ambari-server/src/main/resources/stacks/HDP/2.0._/metainfo.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0._/metainfo.xml deleted file mode 100644 index 45a63e5..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/2.0._/metainfo.xml +++ /dev/null @@ -1,22 +0,0 @@ - - - - - false - - http://git-wip-us.apache.org/repos/asf/ambari/blob/ef81b392/ambari-server/src/main/resources/stacks/HDP/2.0._/repos/repoinfo.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0._/repos/repoinfo.xml deleted file mode 100644 index 09895f9..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/2.0._/repos/repoinfo.xml +++ /dev/null @@ -1,75 +0,0 @@ - - - - - - http://s3.amazonaws.com/dev.hortonworks.com/HDP-2.0.8.0/repos/centos6 - HDP-2.0._ - HDP - - - - - http://s3.amazonaws.com/dev.hortonworks.com/HDP-2.0.8.0/repos/centos5 - HDP-2.0._ - HDP - - - - - http://s3.amazonaws.com/dev.hortonworks.com/HDP-2.0.8.0/repos/centos6 - HDP-2.0._ - HDP - - - - - http://s3.amazonaws.com/dev.hortonworks.com/HDP-2.0.8.0/repos/centos5 - HDP-2.0._ - HDP - - - - - http://s3.amazonaws.com/dev.hortonworks.com/HDP-2.0.8.0/repos/centos6 - HDP-2.0._ - HDP - - - - - http://s3.amazonaws.com/dev.hortonworks.com/HDP-2.0.8.0/repos/centos5 - HDP-2.0._ - HDP - - - - - http://s3.amazonaws.com/dev.hortonworks.com/HDP-2.0.8.0/repos/suse11 - HDP-2.0._ - HDP - - - - - http://s3.amazonaws.com/dev.hortonworks.com/HDP-2.0.8.0/repos/suse11 - HDP-2.0._ - HDP - - - http://git-wip-us.apache.org/repos/asf/ambari/blob/ef81b392/ambari-server/src/main/resources/stacks/HDP/2.0._/role_command_order.json ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/role_command_order.json b/ambari-server/src/main/resources/stacks/HDP/2.0._/role_command_order.json deleted file mode 100644 index 84610ca..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/2.0._/role_command_order.json +++ /dev/null @@ -1,100 +0,0 @@ -{ - "_comment" : "Record format:", - "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]", - "general_deps" : { - "_comment" : "dependencies for all cases", - "NAGIOS_SERVER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HCAT-INSTALL", - "MAPREDUCE_CLIENT-INSTALL", "OOZIE_CLIENT-INSTALL"], - "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"], - "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"], - "OOZIE_SERVER-START": ["JOBTRACKER-START", "TASKTRACKER-START"], - "WEBHCAT_SERVER-START": ["TASKTRACKER-START", "HIVE_SERVER-START"], - "HIVE_METASTORE-START": ["MYSQL_SERVER-START"], - "HIVE_SERVER-START": ["TASKTRACKER-START", "MYSQL_SERVER-START"], - "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"], - "FLUME_SERVER-START": ["OOZIE_SERVER-START"], - "NAGIOS_SERVER-START": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START", - "GANGLIA_SERVER-START", "GANGLIA_MONITOR-START", "HCAT-START", - "HIVE_SERVER-START", "HIVE_METASTORE-START", "HUE_SERVER-START", - "JOBTRACKER-START", "TASKTRACKER-START", "ZOOKEEPER_SERVER-START", - "MYSQL_SERVER-START", "OOZIE_SERVER-START", "PIG-START", "SQOOP-START", - "WEBHCAT_SERVER-START", "FLUME_SERVER-START"], - "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"], - "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START"], - "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"], - "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"], - "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START"], - "HCAT_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START"], - "PIG_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"], - "SQOOP_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"], - "ZOOKEEPER_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"], - "ZOOKEEPER_QUORUM_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"], - "ZOOKEEPER_SERVER-STOP" : ["HBASE_MASTER-STOP", "HBASE_REGIONSERVER-STOP"], - "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"], - "TASKTRACKER-UPGRADE": ["JOBTRACKER-UPGRADE"], - "MAPREDUCE_CLIENT-UPGRADE": ["TASKTRACKER-UPGRADE", "JOBTRACKER-UPGRADE"], - "ZOOKEEPER_SERVER-UPGRADE": ["MAPREDUCE_CLIENT-UPGRADE"], - "ZOOKEEPER_CLIENT-UPGRADE": ["ZOOKEEPER_SERVER-UPGRADE"], - "HBASE_MASTER-UPGRADE": ["ZOOKEEPER_CLIENT-UPGRADE"], - "HBASE_REGIONSERVER-UPGRADE": ["HBASE_MASTER-UPGRADE"], - "HBASE_CLIENT-UPGRADE": ["HBASE_REGIONSERVER-UPGRADE"], - "HIVE_SERVER-UPGRADE" : ["HBASE_CLIENT-UPGRADE"], - "HIVE_METASTORE-UPGRADE" : ["HIVE_SERVER-UPGRADE"], - "MYSQL_SERVER-UPGRADE": ["HIVE_METASTORE-UPGRADE"], - "HIVE_CLIENT-UPGRADE": ["MYSQL_SERVER-UPGRADE"], - "HCAT-UPGRADE": ["HIVE_CLIENT-UPGRADE"], - "OOZIE_SERVER-UPGRADE" : ["HCAT-UPGRADE"], - "OOZIE_CLIENT-UPGRADE" : ["OOZIE_SERVER-UPGRADE"], - "WEBHCAT_SERVER-UPGRADE" : ["OOZIE_CLIENT-UPGRADE"], - "PIG-UPGRADE" : ["WEBHCAT_SERVER-UPGRADE"], - "SQOOP-UPGRADE" : ["PIG-UPGRADE"], - "NAGIOS_SERVER-UPGRADE" : ["SQOOP-UPGRADE"], - "GANGLIA_SERVER-UPGRADE" : ["NAGIOS_SERVER-UPGRADE"], - "GANGLIA_MONITOR-UPGRADE" : ["GANGLIA_SERVER-UPGRADE"] - }, - "_comment" : "GLUSTERFS-specific dependencies", - "optional_glusterfs": { - "HBASE_MASTER-START": ["PEERSTATUS-START"], - "JOBTRACKER-START": ["PEERSTATUS-START"], - "TASKTRACKER-START": ["PEERSTATUS-START"], - "GLUSTERFS_SERVICE_CHECK-SERVICE_CHECK": ["PEERSTATUS-START"], - "JOBTRACKER-UPGRADE": ["GLUSTERFS_CLIENT-UPGRADE"] - }, - "_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster", - "optional_no_glusterfs": { - "SECONDARY_NAMENODE-START": ["NAMENODE-START"], - "RESOURCEMANAGER-START": ["NAMENODE-START", "DATANODE-START"], - "NODEMANAGER-START": ["NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START"], - "HISTORYSERVER-START": ["NAMENODE-START", "DATANODE-START"], - "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START"], - "JOBTRACKER-START": ["NAMENODE-START", "DATANODE-START"], - "TASKTRACKER-START": ["NAMENODE-START", "DATANODE-START"], - "HIVE_SERVER-START": ["DATANODE-START"], - "WEBHCAT_SERVER-START": ["DATANODE-START"], - "NAGIOS_SERVER-START": ["NAMENODE-START", "SECONDARY_NAMENODE-START", - "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "HISTORYSERVER-START"], - "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START", - "SECONDARY_NAMENODE-START"], - "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", - "RESOURCEMANAGER-START", "HISTORYSERVER-START", "YARN_SERVICE_CHECK-SERVICE_CHECK"], - "YARN_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"], - "RESOURCEMANAGER_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START"], - "PIG_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", "NODEMANAGER-START"], - "NAMENODE-STOP": ["JOBTRACKER-STOP", "TASKTRACKER-STOP", "RESOURCEMANAGER-STOP", - "NODEMANAGER-STOP", "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"], - "DATANODE-STOP": ["JOBTRACKER-STOP", "TASKTRACKER-STOP", "RESOURCEMANAGER-STOP", - "NODEMANAGER-STOP", "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"], - "SECONDARY_NAMENODE-UPGRADE": ["NAMENODE-UPGRADE"], - "DATANODE-UPGRADE": ["SECONDARY_NAMENODE-UPGRADE"], - "HDFS_CLIENT-UPGRADE": ["DATANODE-UPGRADE"], - "JOBTRACKER-UPGRADE": ["HDFS_CLIENT-UPGRADE"] - }, - "_comment" : "Dependencies that are used in HA NameNode cluster", - "optional_ha": { - "NAMENODE-START": ["JOURNALNODE-START", "ZOOKEEPER_SERVER-START"], - "ZKFC-START": ["NAMENODE-START"], - "NAGIOS_SERVER-START": ["ZKFC-START", "JOURNALNODE-START"], - "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["ZKFC-START"] - } -} -