ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From aonis...@apache.org
Subject [41/50] [abbrv] ambari git commit: Merge remote-tracking branch 'remotes/origin/trunk' into branch-3.0-perf
Date Thu, 28 Sep 2017 13:25:30 GMT
http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --cc ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_linux.py
index 73c1c8f,e66ec3c..d28920b
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_linux.py
@@@ -166,11 -166,8 +166,8 @@@ rm_hosts = config['clusterHostInfo']['r
  rm_host = rm_hosts[0]
  rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]
  rm_https_port = default('/configurations/yarn-site/yarn.resourcemanager.webapp.https.address', ":8090").split(':')[-1]
- # TODO UPGRADE default, update site during upgrade
- rm_nodes_exclude_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
- rm_nodes_exclude_dir = os.path.dirname(rm_nodes_exclude_path)
  
 -java64_home = config['hostLevelParams']['java_home']
 +java64_home = config['ambariLevelParams']['java_home']
  java_exec = format("{java64_home}/bin/java")
  hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False)
  
@@@ -238,6 -235,16 +235,16 @@@ user_group = config['configurations']['
  #exclude file
  exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
  exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
+ rm_nodes_exclude_dir = os.path.dirname(exclude_file_path)
+ 
 -nm_hosts = default("/clusterHostInfo/nm_hosts", [])
++nm_hosts = default("/clusterHostInfo/nodemanager_hosts", [])
+ #incude file
+ include_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.include-path", None)
+ include_hosts = None
+ manage_include_files = default("/configurations/yarn-site/manage.include.files", False)
+ if include_file_path and manage_include_files:
+   rm_nodes_include_dir = os.path.dirname(include_file_path)
+   include_hosts = list(set(nm_hosts) - set(exclude_hosts))
  
  ats_host = set(default("/clusterHostInfo/app_timeline_server_hosts", []))
  has_ats = not len(ats_host) == 0
@@@ -247,6 -253,9 +253,9 @@@
  # don't using len(nm_hosts) here, because check can take too much time on large clusters
  number_of_nm = 1
  
 -hs_host = default("/clusterHostInfo/hs_host", [])
++hs_host = default("/clusterHostInfo/historyserver_hosts", [])
+ has_hs = not len(hs_host) == 0
+ 
  # default kinit commands
  rm_kinit_cmd = ""
  yarn_timelineservice_kinit_cmd = ""

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_windows.py
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/yarn.py
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/scripts/master.py
----------------------------------------------------------------------
diff --cc ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/scripts/master.py
index 0000000,0fdc27c..f8af7bb
mode 000000,100644..100644
--- a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/scripts/master.py
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/scripts/master.py
@@@ -1,0 -1,522 +1,522 @@@
+ #!/usr/bin/env python
+ """
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+ 
+     http://www.apache.org/licenses/LICENSE-2.0
+ 
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ 
+ """
+ 
+ import glob
+ import os
+ 
+ from resource_management.core import shell, sudo
+ from resource_management.core.logger import Logger
+ from resource_management.core.resources import Directory
+ from resource_management.core.resources.system import Execute, File
+ from resource_management.core.source import InlineTemplate
+ from resource_management.libraries import XmlConfig
+ from resource_management.libraries.functions import StackFeature
+ from resource_management.libraries.functions import get_kinit_path
+ from resource_management.libraries.functions import stack_select
+ from resource_management.libraries.functions.check_process_status import check_process_status
+ from resource_management.libraries.functions.default import default
+ from resource_management.libraries.functions.format import format
+ from resource_management.libraries.functions.stack_features import check_stack_feature
+ from resource_management.libraries.functions.version import format_stack_version
+ from resource_management.libraries.script.script import Script
+ 
+ 
+ class Master(Script):
+   def install(self, env):
+     import params
+     env.set_params(params)
+     self.install_packages(env)
+ 
+     self.create_zeppelin_log_dir(env)
+ 
+     if params.spark_version:
+       Execute('echo spark_version:' + str(params.spark_version) + ' detected for spark_home: '
+               + params.spark_home + ' >> ' + params.zeppelin_log_file, user=params.zeppelin_user)
+     if params.spark2_version:
+       Execute('echo spark2_version:' + str(params.spark2_version) + ' detected for spark2_home: '
+               + params.spark2_home + ' >> ' + params.zeppelin_log_file, user=params.zeppelin_user)
+ 
+   def create_zeppelin_dir(self, params):
+     params.HdfsResource(format("/user/{zeppelin_user}"),
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.zeppelin_user,
+                         recursive_chown=True,
+                         recursive_chmod=True
+                         )
+     params.HdfsResource(format("/user/{zeppelin_user}/test"),
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.zeppelin_user,
+                         recursive_chown=True,
+                         recursive_chmod=True
+                         )
+     params.HdfsResource(format("/apps/zeppelin"),
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.zeppelin_user,
+                         recursive_chown=True,
+                         recursive_chmod=True
+                         )
+ 
+     spark_deps_full_path = self.get_zeppelin_spark_dependencies()[0]
+     spark_dep_file_name = os.path.basename(spark_deps_full_path)
+ 
+     params.HdfsResource(params.spark_jar_dir + "/" + spark_dep_file_name,
+                         type="file",
+                         action="create_on_execute",
+                         source=spark_deps_full_path,
+                         group=params.zeppelin_group,
+                         owner=params.zeppelin_user,
+                         mode=0444,
+                         replace_existing_files=True,
+                         )
+ 
+     params.HdfsResource(None, action="execute")
+ 
+   def create_zeppelin_log_dir(self, env):
+     import params
+     env.set_params(params)
+     Directory([params.zeppelin_log_dir],
+               owner=params.zeppelin_user,
+               group=params.zeppelin_group,
+               cd_access="a",
+               create_parents=True,
+               mode=0755
+               )
+ 
+   def create_zeppelin_hdfs_conf_dir(self, env):
+     import params
+     env.set_params(params)
+     Directory([params.external_dependency_conf],
+               owner=params.zeppelin_user,
+               group=params.zeppelin_group,
+               cd_access="a",
+               create_parents=True,
+               mode=0755
+               )
+ 
+   def chown_zeppelin_pid_dir(self, env):
+     import params
+     env.set_params(params)
+     Execute(("chown", "-R", format("{zeppelin_user}") + ":" + format("{zeppelin_group}"), params.zeppelin_pid_dir),
+             sudo=True)
+ 
+   def configure(self, env):
+     import params
+     import status_params
+     env.set_params(params)
+     env.set_params(status_params)
+     self.create_zeppelin_log_dir(env)
+ 
+     # create the pid and zeppelin dirs
+     Directory([params.zeppelin_pid_dir, params.zeppelin_dir],
+               owner=params.zeppelin_user,
+               group=params.zeppelin_group,
+               cd_access="a",
+               create_parents=True,
+               mode=0755
+     )
+     self.chown_zeppelin_pid_dir(env)
+ 
+     # write out zeppelin-site.xml
+     XmlConfig("zeppelin-site.xml",
+               conf_dir=params.conf_dir,
+               configurations=params.config['configurations']['zeppelin-config'],
+               owner=params.zeppelin_user,
+               group=params.zeppelin_group
+               )
+     # write out zeppelin-env.sh
+     env_content = InlineTemplate(params.zeppelin_env_content)
+     File(format("{params.conf_dir}/zeppelin-env.sh"), content=env_content,
+          owner=params.zeppelin_user, group=params.zeppelin_group)
+ 
+     # write out shiro.ini
+     shiro_ini_content = InlineTemplate(params.shiro_ini_content)
+     File(format("{params.conf_dir}/shiro.ini"), content=shiro_ini_content,
+          owner=params.zeppelin_user, group=params.zeppelin_group)
+ 
+     # write out log4j.properties
+     File(format("{params.conf_dir}/log4j.properties"), content=params.log4j_properties_content,
+          owner=params.zeppelin_user, group=params.zeppelin_group)
+ 
+     self.create_zeppelin_hdfs_conf_dir(env)
+ 
+     if len(params.hbase_master_hosts) > 0 and params.is_hbase_installed:
+       # copy hbase-site.xml
+       XmlConfig("hbase-site.xml",
+               conf_dir=params.external_dependency_conf,
+               configurations=params.config['configurations']['hbase-site'],
 -              configuration_attributes=params.config['configuration_attributes']['hbase-site'],
++              configuration_attributes=params.config['configurationAttributes']['hbase-site'],
+               owner=params.zeppelin_user,
+               group=params.zeppelin_group,
+               mode=0644)
+ 
+       XmlConfig("hdfs-site.xml",
+                 conf_dir=params.external_dependency_conf,
+                 configurations=params.config['configurations']['hdfs-site'],
 -                configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
++                configuration_attributes=params.config['configurationAttributes']['hdfs-site'],
+                 owner=params.zeppelin_user,
+                 group=params.zeppelin_group,
+                 mode=0644)
+ 
+       XmlConfig("core-site.xml",
+                 conf_dir=params.external_dependency_conf,
+                 configurations=params.config['configurations']['core-site'],
 -                configuration_attributes=params.config['configuration_attributes']['core-site'],
++                configuration_attributes=params.config['configurationAttributes']['core-site'],
+                 owner=params.zeppelin_user,
+                 group=params.zeppelin_group,
+                 mode=0644)
+ 
+   def check_and_copy_notebook_in_hdfs(self, params):
+     if params.config['configurations']['zeppelin-config']['zeppelin.notebook.dir'].startswith("/"):
+       notebook_directory = params.config['configurations']['zeppelin-config']['zeppelin.notebook.dir']
+     else:
+       notebook_directory = "/user/" + format("{zeppelin_user}") + "/" + \
+                            params.config['configurations']['zeppelin-config']['zeppelin.notebook.dir']
+ 
+     kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+     kinit_if_needed = format("{kinit_path_local} -kt {zeppelin_kerberos_keytab} {zeppelin_kerberos_principal};")
+ 
+     notebook_directory_exists = shell.call(format("{kinit_if_needed} hdfs --config {hadoop_conf_dir} dfs -test -e {notebook_directory};echo $?"),
+                                            user=params.zeppelin_user)[1]
+ 
+     #if there is no kerberos setup then the string will contain "-bash: kinit: command not found"
+     if "\n" in notebook_directory_exists:
+       notebook_directory_exists = notebook_directory_exists.split("\n")[1]
+ 
+     # '1' means it does not exists
+     if notebook_directory_exists == '1':
+       # hdfs dfs -mkdir {notebook_directory}
+       params.HdfsResource(format("{notebook_directory}"),
+                           type="directory",
+                           action="create_on_execute",
+                           owner=params.zeppelin_user,
+                           recursive_chown=True,
+                           recursive_chmod=True
+                           )
+ 
+       # hdfs dfs -put /usr/hdp/current/zeppelin-server/notebook/ {notebook_directory}
+       params.HdfsResource(format("{notebook_directory}"),
+                             type="directory",
+                             action="create_on_execute",
+                             source=params.notebook_dir,
+                             owner=params.zeppelin_user,
+                             recursive_chown=True,
+                             recursive_chmod=True
+                             )
+ 
+ 
+   def stop(self, env, upgrade_type=None):
+     import params
+     self.create_zeppelin_log_dir(env)
+     self.chown_zeppelin_pid_dir(env)
+     Execute(params.zeppelin_dir + '/bin/zeppelin-daemon.sh stop >> ' + params.zeppelin_log_file,
+             user=params.zeppelin_user)
+ 
+   def start(self, env, upgrade_type=None):
+     import params
+     import status_params
+     self.configure(env)
+ 
+     Execute(("chown", "-R", format("{zeppelin_user}") + ":" + format("{zeppelin_group}"), "/etc/zeppelin"),
+             sudo=True)
+     Execute(("chown", "-R", format("{zeppelin_user}") + ":" + format("{zeppelin_group}"),
+              os.path.join(params.zeppelin_dir, "notebook")), sudo=True)
+ 
+     if 'zeppelin.notebook.storage' in params.config['configurations']['zeppelin-config'] \
+         and params.config['configurations']['zeppelin-config']['zeppelin.notebook.storage'] == 'org.apache.zeppelin.notebook.repo.FileSystemNotebookRepo':
+       self.check_and_copy_notebook_in_hdfs(params)
+ 
+     if params.security_enabled:
+         zeppelin_kinit_cmd = format("{kinit_path_local} -kt {zeppelin_kerberos_keytab} {zeppelin_kerberos_principal}; ")
+         Execute(zeppelin_kinit_cmd, user=params.zeppelin_user)
+ 
+     zeppelin_spark_dependencies = self.get_zeppelin_spark_dependencies()
+     if zeppelin_spark_dependencies and os.path.exists(zeppelin_spark_dependencies[0]):
+       self.create_zeppelin_dir(params)
+ 
+     # if first_setup:
+     if not glob.glob(params.conf_dir + "/interpreter.json") and \
+       not os.path.exists(params.conf_dir + "/interpreter.json"):
+       self.create_interpreter_json()
+       self.update_zeppelin_interpreter()
+ 
+     if params.zeppelin_interpreter_config_upgrade == True:
+       self.reset_interpreter_settings()
+       self.update_zeppelin_interpreter()
+ 
+     Execute(params.zeppelin_dir + '/bin/zeppelin-daemon.sh restart >> '
+             + params.zeppelin_log_file, user=params.zeppelin_user)
+     pidfile = glob.glob(os.path.join(status_params.zeppelin_pid_dir,
+                                      'zeppelin-' + params.zeppelin_user + '*.pid'))[0]
+     Logger.info(format("Pid file is: {pidfile}"))
+ 
+   def status(self, env):
+     import status_params
+     env.set_params(status_params)
+ 
+     try:
+         pid_file = glob.glob(status_params.zeppelin_pid_dir + '/zeppelin-' +
+                              status_params.zeppelin_user + '*.pid')[0]
+     except IndexError:
+         pid_file = ''
+     check_process_status(pid_file)
+ 
+   def reset_interpreter_settings(self):
+     import json
+     import interpreter_json_template
+     interpreter_json_template = json.loads(interpreter_json_template.template)['interpreterSettings']
+     config_data = self.get_interpreter_settings()
+     interpreter_settings = config_data['interpreterSettings']
+ 
+     for setting_key in interpreter_json_template.keys():
+       if setting_key not in interpreter_settings:
+         interpreter_settings[setting_key] = interpreter_json_template[
+           setting_key]
+ 
+     self.set_interpreter_settings(config_data)
+ 
+   def get_interpreter_settings(self):
+     import params
+     import json
+ 
+     interpreter_config = os.path.join(params.conf_dir, "interpreter.json")
+     config_content = sudo.read_file(interpreter_config)
+     config_data = json.loads(config_content)
+     return config_data
+ 
+   def pre_upgrade_restart(self, env, upgrade_type=None):
+     Logger.info("Executing Stack Upgrade pre-restart")
+     import params
+     env.set_params(params)
+ 
+     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, format_stack_version(params.version)):
+       stack_select.select_packages(params.version)
+ 
+   def set_interpreter_settings(self, config_data):
+     import params
+     import json
+ 
+     interpreter_config = os.path.join(params.conf_dir, "interpreter.json")
+     File(interpreter_config,
+          group=params.zeppelin_group,
+          owner=params.zeppelin_user,
+          content=json.dumps(config_data, indent=2)
+          )
+ 
+   def update_kerberos_properties(self):
+     import params
+     config_data = self.get_interpreter_settings()
+     interpreter_settings = config_data['interpreterSettings']
+     for interpreter_setting in interpreter_settings:
+       interpreter = interpreter_settings[interpreter_setting]
+       if interpreter['group'] == 'livy' and params.livy_livyserver_host:
+         if params.zeppelin_kerberos_principal and params.zeppelin_kerberos_keytab and params.security_enabled:
+           interpreter['properties']['zeppelin.livy.principal'] = params.zeppelin_kerberos_principal
+           interpreter['properties']['zeppelin.livy.keytab'] = params.zeppelin_kerberos_keytab
+         else:
+           interpreter['properties']['zeppelin.livy.principal'] = ""
+           interpreter['properties']['zeppelin.livy.keytab'] = ""
+       elif interpreter['group'] == 'spark':
+         if params.zeppelin_kerberos_principal and params.zeppelin_kerberos_keytab and params.security_enabled:
+           interpreter['properties']['spark.yarn.principal'] = params.zeppelin_kerberos_principal
+           interpreter['properties']['spark.yarn.keytab'] = params.zeppelin_kerberos_keytab
+         else:
+           interpreter['properties']['spark.yarn.principal'] = ""
+           interpreter['properties']['spark.yarn.keytab'] = ""
+       elif interpreter['group'] == 'jdbc':
+         if params.zeppelin_kerberos_principal and params.zeppelin_kerberos_keytab and params.security_enabled:
+           interpreter['properties']['zeppelin.jdbc.auth.type'] = "KERBEROS"
+           interpreter['properties']['zeppelin.jdbc.principal'] = params.zeppelin_kerberos_principal
+           interpreter['properties']['zeppelin.jdbc.keytab.location'] = params.zeppelin_kerberos_keytab
+           if params.zookeeper_znode_parent \
+               and params.hbase_zookeeper_quorum \
+               and 'phoenix.url' in interpreter['properties'] \
+               and params.zookeeper_znode_parent not in interpreter['properties']['phoenix.url']:
+             interpreter['properties']['phoenix.url'] = "jdbc:phoenix:" + \
+                                                        params.hbase_zookeeper_quorum + ':' + \
+                                                        params.zookeeper_znode_parent
+         else:
+           interpreter['properties']['zeppelin.jdbc.auth.type'] = "SIMPLE"
+           interpreter['properties']['zeppelin.jdbc.principal'] = ""
+           interpreter['properties']['zeppelin.jdbc.keytab.location'] = ""
+       elif interpreter['group'] == 'sh':
+         if params.zeppelin_kerberos_principal and params.zeppelin_kerberos_keytab and params.security_enabled:
+           interpreter['properties']['zeppelin.shell.auth.type'] = "KERBEROS"
+           interpreter['properties']['zeppelin.shell.principal'] = params.zeppelin_kerberos_principal
+           interpreter['properties']['zeppelin.shell.keytab.location'] = params.zeppelin_kerberos_keytab
+         else:
+           interpreter['properties']['zeppelin.shell.auth.type'] = ""
+           interpreter['properties']['zeppelin.shell.principal'] = ""
+           interpreter['properties']['zeppelin.shell.keytab.location'] = ""
+ 
+     self.set_interpreter_settings(config_data)
+ 
+   def update_zeppelin_interpreter(self):
+     import params
+     config_data = self.get_interpreter_settings()
+     interpreter_settings = config_data['interpreterSettings']
+ 
+     if 'spark2-defaults' in params.config['configurations']:
+       spark2_config = self.get_spark2_interpreter_config()
+       config_id = spark2_config["id"]
+       interpreter_settings[config_id] = spark2_config
+ 
+     if params.livy2_livyserver_host:
+       livy2_config = self.get_livy2_interpreter_config()
+       config_id = livy2_config["id"]
+       interpreter_settings[config_id] = livy2_config
+ 
+     if params.zeppelin_interpreter:
+       settings_to_delete = []
+       for settings_key, interpreter in interpreter_settings.items():
+         if interpreter['group'] not in params.zeppelin_interpreter:
+           settings_to_delete.append(settings_key)
+ 
+       for key in settings_to_delete:
+         del interpreter_settings[key]
+ 
+     hive_interactive_properties_key = 'hive_interactive'
+     for setting_key in interpreter_settings.keys():
+       interpreter = interpreter_settings[setting_key]
+       if interpreter['group'] == 'jdbc':
+         interpreter['dependencies'] = []
+ 
+         if not params.hive_server_host and params.hive_server_interactive_hosts:
+           hive_interactive_properties_key = 'hive'
+ 
+         if params.hive_server_host:
+           interpreter['properties']['hive.driver'] = 'org.apache.hive.jdbc.HiveDriver'
+           interpreter['properties']['hive.user'] = 'hive'
+           interpreter['properties']['hive.password'] = ''
+           interpreter['properties']['hive.proxy.user.property'] = 'hive.server2.proxy.user'
+           if params.hive_server2_support_dynamic_service_discovery:
+             interpreter['properties']['hive.url'] = 'jdbc:hive2://' + \
+                                                  params.hive_zookeeper_quorum + \
+                                                  '/;' + 'serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=' + \
+                                                     params.hive_zookeeper_namespace
+           else:
+             interpreter['properties']['hive.url'] = 'jdbc:hive2://' + \
+                                                  params.hive_server_host + \
+                                                      ':' + params.hive_server_port
+         if params.hive_server_interactive_hosts:
+           interpreter['properties'][hive_interactive_properties_key + '.driver'] = 'org.apache.hive.jdbc.HiveDriver'
+           interpreter['properties'][hive_interactive_properties_key + '.user'] = 'hive'
+           interpreter['properties'][hive_interactive_properties_key + '.password'] = ''
+           interpreter['properties'][hive_interactive_properties_key + '.proxy.user.property'] = 'hive.server2.proxy.user'
+           if params.hive_server2_support_dynamic_service_discovery:
+             interpreter['properties'][hive_interactive_properties_key + '.url'] = 'jdbc:hive2://' + \
+                                                     params.hive_zookeeper_quorum + \
+                                                     '/;' + 'serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=' + \
+                                                     params.hive_interactive_zookeeper_namespace
+           else:
+             interpreter['properties'][hive_interactive_properties_key + '.url'] = 'jdbc:hive2://' + \
+                                                     params.hive_server_interactive_hosts + \
+                                                     ':' + params.hive_server_port
+ 
+         if params.spark_thrift_server_hosts:
+           interpreter['properties']['spark.driver'] = 'org.apache.hive.jdbc.HiveDriver'
+           interpreter['properties']['spark.user'] = 'hive'
+           interpreter['properties']['spark.password'] = ''
+           interpreter['properties']['spark.proxy.user.property'] = 'hive.server2.proxy.user'
+           interpreter['properties']['spark.url'] = 'jdbc:hive2://' + \
+               params.spark_thrift_server_hosts + ':' + params.spark_hive_thrift_port + '/'
+           if params.spark_hive_principal:
+             interpreter['properties']['spark.url'] += ';principal=' + params.spark_hive_principal
+ 
+         if params.spark2_thrift_server_hosts:
+           interpreter['properties']['spark2.driver'] = 'org.apache.hive.jdbc.HiveDriver'
+           interpreter['properties']['spark2.user'] = 'hive'
+           interpreter['properties']['spark2.password'] = ''
+           interpreter['properties']['spark2.proxy.user.property'] = 'hive.server2.proxy.user'
+           interpreter['properties']['spark2.url'] = 'jdbc:hive2://' + \
+               params.spark2_thrift_server_hosts + ':' + params.spark2_hive_thrift_port + '/'
+           if params.spark_hive_principal:
+             interpreter['properties']['spark2.url'] += ';principal=' + params.spark2_hive_principal
+ 
+         if params.zookeeper_znode_parent \
+                 and params.hbase_zookeeper_quorum:
+             interpreter['properties']['phoenix.driver'] = 'org.apache.phoenix.jdbc.PhoenixDriver'
+             interpreter['properties']['phoenix.hbase.client.retries.number'] = '1'
+             interpreter['properties']['phoenix.user'] = 'phoenixuser'
+             interpreter['properties']['phoenix.password'] = ''
+             interpreter['properties']['phoenix.url'] = "jdbc:phoenix:" + \
+                                                     params.hbase_zookeeper_quorum + ':' + \
+                                                     params.zookeeper_znode_parent
+ 
+       elif interpreter['group'] == 'livy' and interpreter['name'] == 'livy':
+         if params.livy_livyserver_host:
+           interpreter['properties']['zeppelin.livy.url'] = "http://" + params.livy_livyserver_host + \
+                                                            ":" + params.livy_livyserver_port
+         else:
+           del interpreter_settings[setting_key]
+ 
+       elif interpreter['group'] == 'livy' and interpreter['name'] == 'livy2':
+         if params.livy2_livyserver_host:
+           interpreter['properties']['zeppelin.livy.url'] = "http://" + params.livy2_livyserver_host + \
+                                                            ":" + params.livy2_livyserver_port
+         else:
+           del interpreter_settings[setting_key]
+ 
+ 
+       elif interpreter['group'] == 'spark' and interpreter['name'] == 'spark':
+         if 'spark-env' in params.config['configurations']:
+           interpreter['properties']['master'] = "yarn-client"
+           interpreter['properties']['SPARK_HOME'] = "/usr/hdp/current/spark-client/"
+         else:
+           del interpreter_settings[setting_key]
+ 
+       elif interpreter['group'] == 'spark' and interpreter['name'] == 'spark2':
+         if 'spark2-env' in params.config['configurations']:
+           interpreter['properties']['master'] = "yarn-client"
+           interpreter['properties']['SPARK_HOME'] = "/usr/hdp/current/spark2-client/"
+         else:
+           del interpreter_settings[setting_key]
+ 
+     self.set_interpreter_settings(config_data)
+     self.update_kerberos_properties()
+ 
+   def create_interpreter_json(self):
+     import interpreter_json_template
+     import params
+ 
+     interpreter_json = interpreter_json_template.template
+     File(format("{params.conf_dir}/interpreter.json"), content=interpreter_json,
+          owner=params.zeppelin_user, group=params.zeppelin_group)
+ 
+   def get_zeppelin_spark_dependencies(self):
+     import params
+     return glob.glob(params.zeppelin_dir + '/interpreter/spark/dep/zeppelin-spark-dependencies*.jar')
+ 
+   def get_spark2_interpreter_config(self):
+     import spark2_config_template
+     import json
+ 
+     return json.loads(spark2_config_template.template)
+ 
+   def get_livy2_interpreter_config(self):
+     import livy2_config_template
+     import json
+ 
+     return json.loads(livy2_config_template.template)
+ 
+ if __name__ == "__main__":
+   Master().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/scripts/params.py
----------------------------------------------------------------------
diff --cc ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/scripts/params.py
index 0000000,3242f26..dd370bd
mode 000000,100644..100644
--- a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/scripts/params.py
@@@ -1,0 -1,258 +1,258 @@@
+ #!/usr/bin/env python
+ """
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+ 
+     http://www.apache.org/licenses/LICENSE-2.0
+ 
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ 
+ """
+ 
+ import functools
+ import os
+ import re
+ from resource_management.libraries.functions import StackFeature
+ from resource_management.libraries.functions import conf_select
+ from resource_management.libraries.functions import get_kinit_path
+ from resource_management.libraries.functions import stack_select
+ from resource_management.libraries.functions.default import default
+ from resource_management.libraries.functions.format import format
+ from resource_management.libraries.functions.get_stack_version import get_stack_version
+ from resource_management.libraries.functions.stack_features import check_stack_feature
+ from resource_management.libraries.functions.version import format_stack_version, get_major_version
+ from resource_management.libraries.resources.hdfs_resource import HdfsResource
+ from resource_management.libraries.script.script import Script
+ 
+ def get_port_from_url(address):
+   if not (address is None):
+     return address.split(':')[-1]
+   else:
+     return address
+ 
+ def extract_spark_version(spark_home):
+   try:
+     with open(spark_home + "/RELEASE") as fline:
+       return re.search('Spark (\d\.\d).+', fline.readline().rstrip()).group(1)
+   except:
+     pass
+   return None
+ 
+ 
+ # server configurations
+ config = Script.get_config()
+ stack_root = Script.get_stack_root()
+ 
+ # e.g. /var/lib/ambari-agent/cache/stacks/HDP/2.2/services/zeppelin-stack/package
+ service_packagedir = os.path.realpath(__file__).split('/scripts')[0]
+ 
+ zeppelin_dirname = 'zeppelin-server'
+ 
+ install_dir = os.path.join(stack_root, "current")
+ executor_mem = config['configurations']['zeppelin-env']['zeppelin.executor.mem']
+ executor_instances = config['configurations']['zeppelin-env'][
+   'zeppelin.executor.instances']
+ 
+ security_enabled = config['configurations']['cluster-env']['security_enabled']
+ 
+ spark_jar_dir = config['configurations']['zeppelin-env']['zeppelin.spark.jar.dir']
+ spark_jar = format("{spark_jar_dir}/zeppelin-spark-0.5.5-SNAPSHOT.jar")
+ setup_view = True
+ temp_file = config['configurations']['zeppelin-env']['zeppelin.temp.file']
+ 
+ spark_home = ""
+ spark_version = None
+ spark2_home = ""
+ spark2_version = None
+ if 'spark-defaults' in config['configurations']:
+   spark_home = os.path.join(stack_root, "current", 'spark-client')
+   spark_version = extract_spark_version(spark_home)
+ if 'spark2-defaults' in config['configurations']:
+   spark2_home = os.path.join(stack_root, "current", 'spark2-client')
+   spark2_version = extract_spark_version(spark2_home)
+ 
+ # New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+ version = default("/commandParams/version", None)
 -stack_name = default("/hostLevelParams/stack_name", None)
++stack_name = default("/clusterLevelParams/stack_name", None)
+ 
+ # params from zeppelin-config
+ zeppelin_port = str(config['configurations']['zeppelin-config']['zeppelin.server.port'])
+ zeppelin_interpreter = None
+ if 'zeppelin.interpreter.group.order' in config['configurations']['zeppelin-config']:
+   zeppelin_interpreter = str(config['configurations']['zeppelin-config']
+                              ['zeppelin.interpreter.group.order']).split(",")
+ 
+ # params from zeppelin-env
+ zeppelin_user = config['configurations']['zeppelin-env']['zeppelin_user']
+ zeppelin_group = config['configurations']['zeppelin-env']['zeppelin_group']
+ zeppelin_log_dir = config['configurations']['zeppelin-env']['zeppelin_log_dir']
+ zeppelin_pid_dir = config['configurations']['zeppelin-env']['zeppelin_pid_dir']
+ zeppelin_log_file = os.path.join(zeppelin_log_dir, 'zeppelin-setup.log')
+ zeppelin_hdfs_user_dir = format("/user/{zeppelin_user}")
+ 
+ zeppelin_dir = os.path.join(*[install_dir, zeppelin_dirname])
+ conf_dir = "/etc/zeppelin/conf"
+ external_dependency_conf = "/etc/zeppelin/conf/external-dependency-conf"
+ notebook_dir = os.path.join(*[install_dir, zeppelin_dirname, 'notebook'])
+ 
+ # zeppelin-env.sh
+ zeppelin_env_content = config['configurations']['zeppelin-env']['zeppelin_env_content']
+ 
+ # shiro.ini
+ shiro_ini_content = config['configurations']['zeppelin-shiro-ini']['shiro_ini_content']
+ 
+ # log4j.properties
+ log4j_properties_content = config['configurations']['zeppelin-log4j-properties']['log4j_properties_content']
+ 
+ # detect configs
+ master_configs = config['clusterHostInfo']
 -java64_home = config['hostLevelParams']['java_home']
++java64_home = config['ambariLevelParams']['java_home']
+ ambari_host = str(master_configs['ambari_server_host'][0])
+ zeppelin_host = str(master_configs['zeppelin_master_hosts'][0])
+ ui_ssl_enabled = config['configurations']['zeppelin-config']['zeppelin.ssl']
+ 
+ # detect HS2 details, if installed
+ 
+ hive_server_host = None
+ hive_metastore_host = '0.0.0.0'
+ hive_metastore_port = None
+ hive_server_port = None
+ hive_zookeeper_quorum = None
+ hive_server2_support_dynamic_service_discovery = None
+ is_hive_installed = False
+ hive_zookeeper_namespace = None
+ hive_interactive_zookeeper_namespace = None
+ 
+ if 'hive_server_host' in master_configs and len(master_configs['hive_server_host']) != 0:
+   is_hive_installed = True
+   spark_hive_properties = {
+     'hive.metastore.uris': default('/configurations/hive-site/hive.metastore.uris', '')
+   }
+   hive_server_host = str(master_configs['hive_server_host'][0])
+   hive_metastore_host = str(master_configs['hive_metastore_host'][0])
+   hive_metastore_port = str(
+     get_port_from_url(default('/configurations/hive-site/hive.metastore.uris', '')))
+   hive_server_port = str(config['configurations']['hive-site']['hive.server2.thrift.http.port'])
+   hive_zookeeper_quorum = config['configurations']['hive-site']['hive.zookeeper.quorum']
+   hive_zookeeper_namespace = config['configurations']['hive-site']['hive.server2.zookeeper.namespace']
+   hive_server2_support_dynamic_service_discovery = config['configurations']['hive-site']['hive.server2.support.dynamic.service.discovery']
+ 
+ hive_server_interactive_hosts = None
+ if 'hive_server_interactive_hosts' in master_configs and len(master_configs['hive_server_interactive_hosts']) != 0:
+     hive_server_interactive_hosts = str(master_configs['hive_server_interactive_hosts'][0])
+     hive_interactive_zookeeper_namespace = config['configurations']['hive-interactive-site']['hive.server2.zookeeper.namespace']
+     hive_server_port = str(config['configurations']['hive-site']['hive.server2.thrift.http.port'])
+     hive_zookeeper_quorum = config['configurations']['hive-site']['hive.zookeeper.quorum']
+     hive_server2_support_dynamic_service_discovery = config['configurations']['hive-site']['hive.server2.support.dynamic.service.discovery']
+ 
+ spark_thrift_server_hosts = None
+ spark_hive_thrift_port = None
+ spark_hive_principal = None
+ if 'spark_thriftserver_hosts' in master_configs and len(master_configs['spark_thriftserver_hosts']) != 0:
+   spark_thrift_server_hosts = str(master_configs['spark_thriftserver_hosts'][0])
+   if config['configurations']['spark-hive-site-override']:
+     spark_hive_thrift_port = config['configurations']['spark-hive-site-override']['hive.server2.thrift.port']
+   if config['configurations']['spark-thrift-sparkconf'] and \
+       'spark.sql.hive.hiveserver2.jdbc.url.principal' in config['configurations']['spark-thrift-sparkconf']:
+     spark_hive_principal = config['configurations']['spark-thrift-sparkconf']['spark.sql.hive.hiveserver2.jdbc.url.principal']
+ 
+ spark2_thrift_server_hosts = None
+ spark2_hive_thrift_port = None
+ spark2_hive_principal = None
+ if 'spark2_thriftserver_hosts' in master_configs and len(master_configs['spark2_thriftserver_hosts']) != 0:
+   spark2_thrift_server_hosts = str(master_configs['spark2_thriftserver_hosts'][0])
+   if config['configurations']['spark2-hive-site-override']:
+     spark2_hive_thrift_port = config['configurations']['spark2-hive-site-override']['hive.server2.thrift.port']
+   if config['configurations']['spark2-thrift-sparkconf'] and \
+       'spark.sql.hive.hiveserver2.jdbc.url.principal' in config['configurations']['spark2-thrift-sparkconf']:
+     spark2_hive_principal = config['configurations']['spark2-thrift-sparkconf']['spark.sql.hive.hiveserver2.jdbc.url.principal']
+ 
+ 
+ # detect hbase details if installed
+ zookeeper_znode_parent = None
+ hbase_zookeeper_quorum = None
+ is_hbase_installed = False
+ if 'hbase_master_hosts' in master_configs and 'hbase-site' in config['configurations']:
+   is_hbase_installed = True
+   zookeeper_znode_parent = config['configurations']['hbase-site']['zookeeper.znode.parent']
+   hbase_zookeeper_quorum = config['configurations']['hbase-site']['hbase.zookeeper.quorum']
+ 
+ # detect spark queue
+ if 'spark-defaults' in config['configurations'] and 'spark.yarn.queue' in config['configurations']['spark-defaults']:
+   spark_queue = config['configurations']['spark-defaults']['spark.yarn.queue']
+ elif 'spark2-defaults' in config['configurations'] and 'spark.yarn.queue' in config['configurations']['spark2-defaults']:
+   spark_queue = config['configurations']['spark2-defaults']['spark.yarn.queue']
+ else:
+   spark_queue = 'default'
+ 
+ zeppelin_kerberos_keytab = config['configurations']['zeppelin-env']['zeppelin.server.kerberos.keytab']
+ zeppelin_kerberos_principal = config['configurations']['zeppelin-env']['zeppelin.server.kerberos.principal']
+ if 'zeppelin.interpreter.config.upgrade' in config['configurations']['zeppelin-config']:
+   zeppelin_interpreter_config_upgrade = config['configurations']['zeppelin-config']['zeppelin.interpreter.config.upgrade']
+ else:
+   zeppelin_interpreter_config_upgrade = False
+ 
+ # e.g. 2.3
 -stack_version_unformatted = config['hostLevelParams']['stack_version']
++stack_version_unformatted = config['clusterLevelParams']['stack_version']
+ 
+ # e.g. 2.3.0.0
+ stack_version_formatted = format_stack_version(stack_version_unformatted)
+ major_stack_version = get_major_version(stack_version_formatted)
+ 
+ # e.g. 2.3.0.0-2130
+ full_stack_version = default("/commandParams/version", None)
+ 
+ spark_client_version = get_stack_version('spark-client')
+ 
+ hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+ livy_hosts = default("/clusterHostInfo/livy_server_hosts", [])
+ livy2_hosts = default("/clusterHostInfo/livy2_server_hosts", [])
+ 
+ livy_livyserver_host = None
+ livy_livyserver_port = None
+ livy2_livyserver_host = None
+ livy2_livyserver_port = None
+ if stack_version_formatted and check_stack_feature(StackFeature.SPARK_LIVY, stack_version_formatted) and \
+     len(livy_hosts) > 0:
+   livy_livyserver_host = str(livy_hosts[0])
+   livy_livyserver_port = config['configurations']['livy-conf']['livy.server.port']
+ 
+ if stack_version_formatted and check_stack_feature(StackFeature.SPARK_LIVY2, stack_version_formatted) and \
+     len(livy2_hosts) > 0:
+   livy2_livyserver_host = str(livy2_hosts[0])
+   livy2_livyserver_port = config['configurations']['livy2-conf']['livy.server.port']
+ 
+ hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+ security_enabled = config['configurations']['cluster-env']['security_enabled']
+ hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+ kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+ hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+ hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+ hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+ hdfs_site = config['configurations']['hdfs-site']
+ default_fs = config['configurations']['core-site']['fs.defaultFS']
+ 
+ # create partial functions with common arguments for every HdfsResource call
+ # to create hdfs directory we need to call params.HdfsResource in code
+ HdfsResource = functools.partial(
+   HdfsResource,
+   user=hdfs_user,
+   hdfs_resource_ignore_file="/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+   security_enabled=security_enabled,
+   keytab=hdfs_user_keytab,
+   kinit_path_local=kinit_path_local,
+   hadoop_bin_dir=hadoop_bin_dir,
+   hadoop_conf_dir=hadoop_conf_dir,
+   principal_name=hdfs_principal_name,
+   hdfs_site=hdfs_site,
+   default_fs=default_fs
+ )

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/package/scripts/master.py
----------------------------------------------------------------------
diff --cc ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/package/scripts/master.py
index 0000000,8bdfaec..e1b21b6
mode 000000,100644..100644
--- a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/package/scripts/master.py
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/package/scripts/master.py
@@@ -1,0 -1,564 +1,564 @@@
+ #!/usr/bin/env python
+ """
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+ 
+     http://www.apache.org/licenses/LICENSE-2.0
+ 
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ 
+ """
+ 
+ import glob
+ import os
+ 
+ from resource_management.core import shell, sudo
+ from resource_management.core.logger import Logger
+ from resource_management.core.exceptions import Fail
+ from resource_management.core.resources import Directory
+ from resource_management.core.resources.system import Execute, File
+ from resource_management.core.source import InlineTemplate
+ from resource_management.libraries import XmlConfig
+ from resource_management.libraries.functions import StackFeature
+ from resource_management.libraries.functions import get_kinit_path
+ from resource_management.libraries.functions import stack_select
+ from resource_management.libraries.functions.check_process_status import check_process_status
+ from resource_management.libraries.functions.default import default
+ from resource_management.libraries.functions.format import format
+ from resource_management.libraries.functions.stack_features import check_stack_feature
+ from resource_management.libraries.functions.version import format_stack_version
+ from resource_management.libraries.script.script import Script
+ 
+ 
+ class Master(Script):
+   def install(self, env):
+     import params
+     env.set_params(params)
+     self.install_packages(env)
+ 
+     self.create_zeppelin_log_dir(env)
+ 
+     if params.spark_version:
+       Execute('echo spark_version:' + str(params.spark_version) + ' detected for spark_home: '
+               + params.spark_home + ' >> ' + params.zeppelin_log_file, user=params.zeppelin_user)
+     if params.spark2_version:
+       Execute('echo spark2_version:' + str(params.spark2_version) + ' detected for spark2_home: '
+               + params.spark2_home + ' >> ' + params.zeppelin_log_file, user=params.zeppelin_user)
+ 
+   def create_zeppelin_dir(self, params):
+     params.HdfsResource(format("/user/{zeppelin_user}"),
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.zeppelin_user,
+                         recursive_chown=True,
+                         recursive_chmod=True
+                         )
+     params.HdfsResource(format("/user/{zeppelin_user}/test"),
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.zeppelin_user,
+                         recursive_chown=True,
+                         recursive_chmod=True
+                         )
+     params.HdfsResource(format("/apps/zeppelin"),
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.zeppelin_user,
+                         recursive_chown=True,
+                         recursive_chmod=True
+                         )
+ 
+     spark_deps_full_path = self.get_zeppelin_spark_dependencies()[0]
+     spark_dep_file_name = os.path.basename(spark_deps_full_path)
+ 
+     params.HdfsResource(params.spark_jar_dir + "/" + spark_dep_file_name,
+                         type="file",
+                         action="create_on_execute",
+                         source=spark_deps_full_path,
+                         group=params.zeppelin_group,
+                         owner=params.zeppelin_user,
+                         mode=0444,
+                         replace_existing_files=True,
+                         )
+ 
+     params.HdfsResource(None, action="execute")
+ 
+   def create_zeppelin_log_dir(self, env):
+     import params
+     env.set_params(params)
+     Directory([params.zeppelin_log_dir],
+               owner=params.zeppelin_user,
+               group=params.zeppelin_group,
+               cd_access="a",
+               create_parents=True,
+               mode=0755
+               )
+ 
+   def create_zeppelin_hdfs_conf_dir(self, env):
+     import params
+     env.set_params(params)
+     Directory([params.external_dependency_conf],
+               owner=params.zeppelin_user,
+               group=params.zeppelin_group,
+               cd_access="a",
+               create_parents=True,
+               mode=0755
+               )
+ 
+   def chown_zeppelin_pid_dir(self, env):
+     import params
+     env.set_params(params)
+     Execute(("chown", "-R", format("{zeppelin_user}") + ":" + format("{zeppelin_group}"), params.zeppelin_pid_dir),
+             sudo=True)
+ 
+   def configure(self, env):
+     import params
+     import status_params
+     env.set_params(params)
+     env.set_params(status_params)
+     self.create_zeppelin_log_dir(env)
+ 
+     # create the pid and zeppelin dirs
+     Directory([params.zeppelin_pid_dir, params.zeppelin_dir],
+               owner=params.zeppelin_user,
+               group=params.zeppelin_group,
+               cd_access="a",
+               create_parents=True,
+               mode=0755
+     )
+     self.chown_zeppelin_pid_dir(env)
+ 
+     # write out zeppelin-site.xml
+     XmlConfig("zeppelin-site.xml",
+               conf_dir=params.conf_dir,
+               configurations=params.config['configurations']['zeppelin-config'],
+               owner=params.zeppelin_user,
+               group=params.zeppelin_group
+               )
+     # write out zeppelin-env.sh
+     env_content = InlineTemplate(params.zeppelin_env_content)
+     File(format("{params.conf_dir}/zeppelin-env.sh"), content=env_content,
+          owner=params.zeppelin_user, group=params.zeppelin_group)
+ 
+     # write out shiro.ini
+     shiro_ini_content = InlineTemplate(params.shiro_ini_content)
+     File(format("{params.conf_dir}/shiro.ini"), content=shiro_ini_content,
+          owner=params.zeppelin_user, group=params.zeppelin_group)
+ 
+     # write out log4j.properties
+     File(format("{params.conf_dir}/log4j.properties"), content=params.log4j_properties_content,
+          owner=params.zeppelin_user, group=params.zeppelin_group)
+ 
+     self.create_zeppelin_hdfs_conf_dir(env)
+ 
+     if len(params.hbase_master_hosts) > 0 and params.is_hbase_installed:
+       # copy hbase-site.xml
+       XmlConfig("hbase-site.xml",
+               conf_dir=params.external_dependency_conf,
+               configurations=params.config['configurations']['hbase-site'],
 -              configuration_attributes=params.config['configuration_attributes']['hbase-site'],
++              configuration_attributes=params.config['configurationAttributes']['hbase-site'],
+               owner=params.zeppelin_user,
+               group=params.zeppelin_group,
+               mode=0644)
+ 
+       XmlConfig("hdfs-site.xml",
+                 conf_dir=params.external_dependency_conf,
+                 configurations=params.config['configurations']['hdfs-site'],
 -                configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
++                configuration_attributes=params.config['configurationAttributes']['hdfs-site'],
+                 owner=params.zeppelin_user,
+                 group=params.zeppelin_group,
+                 mode=0644)
+ 
+       XmlConfig("core-site.xml",
+                 conf_dir=params.external_dependency_conf,
+                 configurations=params.config['configurations']['core-site'],
 -                configuration_attributes=params.config['configuration_attributes']['core-site'],
++                configuration_attributes=params.config['configurationAttributes']['core-site'],
+                 owner=params.zeppelin_user,
+                 group=params.zeppelin_group,
+                 mode=0644)
+ 
+   def check_and_copy_notebook_in_hdfs(self, params):
+     if params.config['configurations']['zeppelin-config']['zeppelin.notebook.dir'].startswith("/"):
+       notebook_directory = params.config['configurations']['zeppelin-config']['zeppelin.notebook.dir']
+     else:
+       notebook_directory = "/user/" + format("{zeppelin_user}") + "/" + \
+                            params.config['configurations']['zeppelin-config']['zeppelin.notebook.dir']
+ 
+     kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+     kinit_if_needed = format("{kinit_path_local} -kt {zeppelin_kerberos_keytab} {zeppelin_kerberos_principal};")
+ 
+     notebook_directory_exists = shell.call(format("{kinit_if_needed} hdfs --config {hadoop_conf_dir} dfs -test -e {notebook_directory};echo $?"),
+                                            user=params.zeppelin_user)[1]
+ 
+     #if there is no kerberos setup then the string will contain "-bash: kinit: command not found"
+     if "\n" in notebook_directory_exists:
+       notebook_directory_exists = notebook_directory_exists.split("\n")[1]
+ 
+     # '1' means it does not exists
+     if notebook_directory_exists == '1':
+       # hdfs dfs -mkdir {notebook_directory}
+       params.HdfsResource(format("{notebook_directory}"),
+                           type="directory",
+                           action="create_on_execute",
+                           owner=params.zeppelin_user,
+                           recursive_chown=True,
+                           recursive_chmod=True
+                           )
+ 
+       # hdfs dfs -put /usr/hdp/current/zeppelin-server/notebook/ {notebook_directory}
+       params.HdfsResource(format("{notebook_directory}"),
+                             type="directory",
+                             action="create_on_execute",
+                             source=params.notebook_dir,
+                             owner=params.zeppelin_user,
+                             recursive_chown=True,
+                             recursive_chmod=True
+                             )
+ 
+ 
+   def stop(self, env, upgrade_type=None):
+     import params
+     self.create_zeppelin_log_dir(env)
+     self.chown_zeppelin_pid_dir(env)
+     Execute(params.zeppelin_dir + '/bin/zeppelin-daemon.sh stop >> ' + params.zeppelin_log_file,
+             user=params.zeppelin_user)
+ 
+   def start(self, env, upgrade_type=None):
+     import params
+     import status_params
+     self.configure(env)
+ 
+     Execute(("chown", "-R", format("{zeppelin_user}") + ":" + format("{zeppelin_group}"), "/etc/zeppelin"),
+             sudo=True)
+     Execute(("chown", "-R", format("{zeppelin_user}") + ":" + format("{zeppelin_group}"),
+              os.path.join(params.zeppelin_dir, "notebook")), sudo=True)
+ 
+     if 'zeppelin.notebook.storage' in params.config['configurations']['zeppelin-config'] \
+         and params.config['configurations']['zeppelin-config']['zeppelin.notebook.storage'] == 'org.apache.zeppelin.notebook.repo.FileSystemNotebookRepo':
+       self.check_and_copy_notebook_in_hdfs(params)
+ 
+     if params.security_enabled:
+         zeppelin_kinit_cmd = format("{kinit_path_local} -kt {zeppelin_kerberos_keytab} {zeppelin_kerberos_principal}; ")
+         Execute(zeppelin_kinit_cmd, user=params.zeppelin_user)
+ 
+     zeppelin_spark_dependencies = self.get_zeppelin_spark_dependencies()
+     if zeppelin_spark_dependencies and os.path.exists(zeppelin_spark_dependencies[0]):
+       self.create_zeppelin_dir(params)
+ 
+     # if first_setup:
+     if not glob.glob(params.conf_dir + "/interpreter.json") and \
+       not os.path.exists(params.conf_dir + "/interpreter.json"):
+       self.create_interpreter_json()
+       self.update_zeppelin_interpreter()
+ 
+     if params.zeppelin_interpreter_config_upgrade == True:
+       self.reset_interpreter_settings()
+       self.update_zeppelin_interpreter()
+ 
+     Execute(params.zeppelin_dir + '/bin/zeppelin-daemon.sh restart >> '
+             + params.zeppelin_log_file, user=params.zeppelin_user)
+     pidfile = glob.glob(os.path.join(status_params.zeppelin_pid_dir,
+                                      'zeppelin-' + params.zeppelin_user + '*.pid'))[0]
+     Logger.info(format("Pid file is: {pidfile}"))
+ 
+   def status(self, env):
+     import status_params
+     env.set_params(status_params)
+ 
+     try:
+         pid_file = glob.glob(status_params.zeppelin_pid_dir + '/zeppelin-' +
+                              status_params.zeppelin_user + '*.pid')[0]
+     except IndexError:
+         pid_file = ''
+     check_process_status(pid_file)
+ 
+   def reset_interpreter_settings(self):
+     import json
+     import interpreter_json_template
+     interpreter_json_template = json.loads(interpreter_json_template.template)['interpreterSettings']
+     config_data = self.get_interpreter_settings()
+     interpreter_settings = config_data['interpreterSettings']
+ 
+     for setting_key in interpreter_json_template.keys():
+       if setting_key not in interpreter_settings:
+         interpreter_settings[setting_key] = interpreter_json_template[
+           setting_key]
+ 
+     self.set_interpreter_settings(config_data)
+ 
+   def pre_upgrade_restart(self, env, upgrade_type=None):
+     Logger.info("Executing Stack Upgrade pre-restart")
+     import params
+     env.set_params(params)
+ 
+     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, format_stack_version(params.version)):
+       stack_select.select_packages(params.version)
+ 
+   def getZeppelinConfFS(self, params):
+     hdfs_interpreter_config = params.config['configurations']['zeppelin-config']['zeppelin.config.fs.dir'] + "/interpreter.json"
+ 
+     if not hdfs_interpreter_config.startswith("/"):
+       hdfs_interpreter_config = "/user/" + format("{zeppelin_user}") + "/" + hdfs_interpreter_config
+ 
+     return hdfs_interpreter_config
+ 
+   def get_interpreter_settings(self):
+     import params
+     import json
+ 
+     interpreter_config = os.path.join(params.conf_dir, "interpreter.json")
+     if 'zeppelin.notebook.storage' in params.config['configurations']['zeppelin-config'] \
+       and params.config['configurations']['zeppelin-config']['zeppelin.notebook.storage'] == 'org.apache.zeppelin.notebook.repo.FileSystemNotebookRepo':
+ 
+       if 'zeppelin.config.fs.dir' in params.config['configurations']['zeppelin-config']:
+         try:
+           # copy from hdfs to /etc/zeppelin/conf/interpreter.json
+           params.HdfsResource(interpreter_config,
+                               type="file",
+                               action="download_on_execute",
+                               source=self.getZeppelinConfFS(params),
+                               group=params.zeppelin_group,
+                               owner=params.zeppelin_user)
+         except Fail as fail:
+           if "doesn't exist" not in fail.args[0]:
+             print "Error getting interpreter.json from HDFS"
+             print fail.args
+             raise Fail
+ 
+     config_content = sudo.read_file(interpreter_config)
+     config_data = json.loads(config_content)
+     return config_data
+ 
+   def set_interpreter_settings(self, config_data):
+     import params
+     import json
+ 
+     interpreter_config = os.path.join(params.conf_dir, "interpreter.json")
+     File(interpreter_config,
+          group=params.zeppelin_group,
+          owner=params.zeppelin_user,
+          content=json.dumps(config_data, indent=2))
+ 
+     if 'zeppelin.notebook.storage' in params.config['configurations']['zeppelin-config'] \
+       and params.config['configurations']['zeppelin-config']['zeppelin.notebook.storage'] == 'org.apache.zeppelin.notebook.repo.FileSystemNotebookRepo':
+ 
+       if 'zeppelin.config.fs.dir' in params.config['configurations']['zeppelin-config']:
+         params.HdfsResource(self.getZeppelinConfFS(params),
+                             type="file",
+                             action="create_on_execute",
+                             source=interpreter_config,
+                             group=params.zeppelin_group,
+                             owner=params.zeppelin_user,
+                             user=params.zeppelin_user,
+                             replace_existing_files=True)
+ 
+   def update_kerberos_properties(self):
+     import params
+     config_data = self.get_interpreter_settings()
+     interpreter_settings = config_data['interpreterSettings']
+     for interpreter_setting in interpreter_settings:
+       interpreter = interpreter_settings[interpreter_setting]
+       if interpreter['group'] == 'livy' and params.livy_livyserver_host:
+         if params.zeppelin_kerberos_principal and params.zeppelin_kerberos_keytab and params.security_enabled:
+           interpreter['properties']['zeppelin.livy.principal'] = params.zeppelin_kerberos_principal
+           interpreter['properties']['zeppelin.livy.keytab'] = params.zeppelin_kerberos_keytab
+         else:
+           interpreter['properties']['zeppelin.livy.principal'] = ""
+           interpreter['properties']['zeppelin.livy.keytab'] = ""
+       elif interpreter['group'] == 'spark':
+         if params.zeppelin_kerberos_principal and params.zeppelin_kerberos_keytab and params.security_enabled:
+           interpreter['properties']['spark.yarn.principal'] = params.zeppelin_kerberos_principal
+           interpreter['properties']['spark.yarn.keytab'] = params.zeppelin_kerberos_keytab
+         else:
+           interpreter['properties']['spark.yarn.principal'] = ""
+           interpreter['properties']['spark.yarn.keytab'] = ""
+       elif interpreter['group'] == 'jdbc':
+         if params.zeppelin_kerberos_principal and params.zeppelin_kerberos_keytab and params.security_enabled:
+           interpreter['properties']['zeppelin.jdbc.auth.type'] = "KERBEROS"
+           interpreter['properties']['zeppelin.jdbc.principal'] = params.zeppelin_kerberos_principal
+           interpreter['properties']['zeppelin.jdbc.keytab.location'] = params.zeppelin_kerberos_keytab
+           if params.zookeeper_znode_parent \
+               and params.hbase_zookeeper_quorum \
+               and 'phoenix.url' in interpreter['properties'] \
+               and params.zookeeper_znode_parent not in interpreter['properties']['phoenix.url']:
+             interpreter['properties']['phoenix.url'] = "jdbc:phoenix:" + \
+                                                        params.hbase_zookeeper_quorum + ':' + \
+                                                        params.zookeeper_znode_parent
+         else:
+           interpreter['properties']['zeppelin.jdbc.auth.type'] = "SIMPLE"
+           interpreter['properties']['zeppelin.jdbc.principal'] = ""
+           interpreter['properties']['zeppelin.jdbc.keytab.location'] = ""
+       elif interpreter['group'] == 'sh':
+         if params.zeppelin_kerberos_principal and params.zeppelin_kerberos_keytab and params.security_enabled:
+           interpreter['properties']['zeppelin.shell.auth.type'] = "KERBEROS"
+           interpreter['properties']['zeppelin.shell.principal'] = params.zeppelin_kerberos_principal
+           interpreter['properties']['zeppelin.shell.keytab.location'] = params.zeppelin_kerberos_keytab
+         else:
+           interpreter['properties']['zeppelin.shell.auth.type'] = ""
+           interpreter['properties']['zeppelin.shell.principal'] = ""
+           interpreter['properties']['zeppelin.shell.keytab.location'] = ""
+ 
+     self.set_interpreter_settings(config_data)
+ 
+   def update_zeppelin_interpreter(self):
+     import params
+     config_data = self.get_interpreter_settings()
+     interpreter_settings = config_data['interpreterSettings']
+ 
+     if 'spark2-defaults' in params.config['configurations']:
+       spark2_config = self.get_spark2_interpreter_config()
+       config_id = spark2_config["id"]
+       interpreter_settings[config_id] = spark2_config
+ 
+     if params.livy2_livyserver_host:
+       livy2_config = self.get_livy2_interpreter_config()
+       config_id = livy2_config["id"]
+       interpreter_settings[config_id] = livy2_config
+ 
+     if params.zeppelin_interpreter:
+       settings_to_delete = []
+       for settings_key, interpreter in interpreter_settings.items():
+         if interpreter['group'] not in params.zeppelin_interpreter:
+           settings_to_delete.append(settings_key)
+ 
+       for key in settings_to_delete:
+         del interpreter_settings[key]
+ 
+     hive_interactive_properties_key = 'hive_interactive'
+     for setting_key in interpreter_settings.keys():
+       interpreter = interpreter_settings[setting_key]
+       if interpreter['group'] == 'jdbc':
+         interpreter['dependencies'] = []
+ 
+         if not params.hive_server_host and params.hive_server_interactive_hosts:
+           hive_interactive_properties_key = 'hive'
+ 
+         if params.hive_server_host:
+           interpreter['properties']['hive.driver'] = 'org.apache.hive.jdbc.HiveDriver'
+           interpreter['properties']['hive.user'] = 'hive'
+           interpreter['properties']['hive.password'] = ''
+           interpreter['properties']['hive.proxy.user.property'] = 'hive.server2.proxy.user'
+           if params.hive_server2_support_dynamic_service_discovery:
+             interpreter['properties']['hive.url'] = 'jdbc:hive2://' + \
+                                                  params.hive_zookeeper_quorum + \
+                                                  '/;' + 'serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=' + \
+                                                     params.hive_zookeeper_namespace
+           else:
+             interpreter['properties']['hive.url'] = 'jdbc:hive2://' + \
+                                                  params.hive_server_host + \
+                                                      ':' + params.hive_server_port
+         if params.hive_server_interactive_hosts:
+           interpreter['properties'][hive_interactive_properties_key + '.driver'] = 'org.apache.hive.jdbc.HiveDriver'
+           interpreter['properties'][hive_interactive_properties_key + '.user'] = 'hive'
+           interpreter['properties'][hive_interactive_properties_key + '.password'] = ''
+           interpreter['properties'][hive_interactive_properties_key + '.proxy.user.property'] = 'hive.server2.proxy.user'
+           if params.hive_server2_support_dynamic_service_discovery:
+             interpreter['properties'][hive_interactive_properties_key + '.url'] = 'jdbc:hive2://' + \
+                                                     params.hive_zookeeper_quorum + \
+                                                     '/;' + 'serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=' + \
+                                                     params.hive_interactive_zookeeper_namespace
+           else:
+             interpreter['properties'][hive_interactive_properties_key + '.url'] = 'jdbc:hive2://' + \
+                                                     params.hive_server_interactive_hosts + \
+                                                     ':' + params.hive_server_port
+ 
+         if params.spark_thrift_server_hosts:
+           interpreter['properties']['spark.driver'] = 'org.apache.hive.jdbc.HiveDriver'
+           interpreter['properties']['spark.user'] = 'hive'
+           interpreter['properties']['spark.password'] = ''
+           interpreter['properties']['spark.proxy.user.property'] = 'hive.server2.proxy.user'
+           interpreter['properties']['spark.url'] = 'jdbc:hive2://' + \
+               params.spark_thrift_server_hosts + ':' + params.spark_hive_thrift_port + '/'
+           if params.spark_hive_principal:
+             interpreter['properties']['spark.url'] += ';principal=' + params.spark_hive_principal
+ 
+         if params.spark2_thrift_server_hosts:
+           interpreter['properties']['spark2.driver'] = 'org.apache.hive.jdbc.HiveDriver'
+           interpreter['properties']['spark2.user'] = 'hive'
+           interpreter['properties']['spark2.password'] = ''
+           interpreter['properties']['spark2.proxy.user.property'] = 'hive.server2.proxy.user'
+           interpreter['properties']['spark2.url'] = 'jdbc:hive2://' + \
+               params.spark2_thrift_server_hosts + ':' + params.spark2_hive_thrift_port + '/'
+           if params.spark_hive_principal:
+             interpreter['properties']['spark2.url'] += ';principal=' + params.spark2_hive_principal
+ 
+         if params.zookeeper_znode_parent \
+                 and params.hbase_zookeeper_quorum:
+             interpreter['properties']['phoenix.driver'] = 'org.apache.phoenix.jdbc.PhoenixDriver'
+             interpreter['properties']['phoenix.hbase.client.retries.number'] = '1'
+             interpreter['properties']['phoenix.user'] = 'phoenixuser'
+             interpreter['properties']['phoenix.password'] = ''
+             interpreter['properties']['phoenix.url'] = "jdbc:phoenix:" + \
+                                                     params.hbase_zookeeper_quorum + ':' + \
+                                                     params.zookeeper_znode_parent
+ 
+       elif interpreter['group'] == 'livy' and interpreter['name'] == 'livy':
+         if params.livy_livyserver_host:
+           interpreter['properties']['zeppelin.livy.url'] = "http://" + params.livy_livyserver_host + \
+                                                            ":" + params.livy_livyserver_port
+         else:
+           del interpreter_settings[setting_key]
+ 
+       elif interpreter['group'] == 'livy' and interpreter['name'] == 'livy2':
+         if params.livy2_livyserver_host:
+           interpreter['properties']['zeppelin.livy.url'] = "http://" + params.livy2_livyserver_host + \
+                                                            ":" + params.livy2_livyserver_port
+         else:
+           del interpreter_settings[setting_key]
+ 
+ 
+       elif interpreter['group'] == 'spark' and interpreter['name'] == 'spark':
+         if 'spark-env' in params.config['configurations']:
+           interpreter['properties']['master'] = "yarn-client"
+           interpreter['properties']['SPARK_HOME'] = "/usr/hdp/current/spark-client/"
+         else:
+           del interpreter_settings[setting_key]
+ 
+       elif interpreter['group'] == 'spark' and interpreter['name'] == 'spark2':
+         if 'spark2-env' in params.config['configurations']:
+           interpreter['properties']['master'] = "yarn-client"
+           interpreter['properties']['SPARK_HOME'] = "/usr/hdp/current/spark2-client/"
+         else:
+           del interpreter_settings[setting_key]
+ 
+     self.set_interpreter_settings(config_data)
+     self.update_kerberos_properties()
+ 
+   def create_interpreter_json(self):
+     import interpreter_json_template
+     import params
+ 
+     interpreter_json = interpreter_json_template.template
+     File(format("{params.conf_dir}/interpreter.json"),
+          content=interpreter_json,
+          owner=params.zeppelin_user,
+          group=params.zeppelin_group,
+          mode=0664)
+ 
+   def get_zeppelin_spark_dependencies(self):
+     import params
+     return glob.glob(params.zeppelin_dir + '/interpreter/spark/dep/zeppelin-spark-dependencies*.jar')
+ 
+   def get_spark2_interpreter_config(self):
+     import spark2_config_template
+     import json
+ 
+     return json.loads(spark2_config_template.template)
+ 
+   def get_livy2_interpreter_config(self):
+     import livy2_config_template
+     import json
+ 
+     return json.loads(livy2_config_template.template)
+ 
+ if __name__ == "__main__":
+   Master().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/package/scripts/params.py
----------------------------------------------------------------------
diff --cc ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/package/scripts/params.py
index 0000000,3242f26..dd370bd
mode 000000,100644..100644
--- a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/package/scripts/params.py
@@@ -1,0 -1,258 +1,258 @@@
+ #!/usr/bin/env python
+ """
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+ 
+     http://www.apache.org/licenses/LICENSE-2.0
+ 
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ 
+ """
+ 
+ import functools
+ import os
+ import re
+ from resource_management.libraries.functions import StackFeature
+ from resource_management.libraries.functions import conf_select
+ from resource_management.libraries.functions import get_kinit_path
+ from resource_management.libraries.functions import stack_select
+ from resource_management.libraries.functions.default import default
+ from resource_management.libraries.functions.format import format
+ from resource_management.libraries.functions.get_stack_version import get_stack_version
+ from resource_management.libraries.functions.stack_features import check_stack_feature
+ from resource_management.libraries.functions.version import format_stack_version, get_major_version
+ from resource_management.libraries.resources.hdfs_resource import HdfsResource
+ from resource_management.libraries.script.script import Script
+ 
+ def get_port_from_url(address):
+   if not (address is None):
+     return address.split(':')[-1]
+   else:
+     return address
+ 
+ def extract_spark_version(spark_home):
+   try:
+     with open(spark_home + "/RELEASE") as fline:
+       return re.search('Spark (\d\.\d).+', fline.readline().rstrip()).group(1)
+   except:
+     pass
+   return None
+ 
+ 
+ # server configurations
+ config = Script.get_config()
+ stack_root = Script.get_stack_root()
+ 
+ # e.g. /var/lib/ambari-agent/cache/stacks/HDP/2.2/services/zeppelin-stack/package
+ service_packagedir = os.path.realpath(__file__).split('/scripts')[0]
+ 
+ zeppelin_dirname = 'zeppelin-server'
+ 
+ install_dir = os.path.join(stack_root, "current")
+ executor_mem = config['configurations']['zeppelin-env']['zeppelin.executor.mem']
+ executor_instances = config['configurations']['zeppelin-env'][
+   'zeppelin.executor.instances']
+ 
+ security_enabled = config['configurations']['cluster-env']['security_enabled']
+ 
+ spark_jar_dir = config['configurations']['zeppelin-env']['zeppelin.spark.jar.dir']
+ spark_jar = format("{spark_jar_dir}/zeppelin-spark-0.5.5-SNAPSHOT.jar")
+ setup_view = True
+ temp_file = config['configurations']['zeppelin-env']['zeppelin.temp.file']
+ 
+ spark_home = ""
+ spark_version = None
+ spark2_home = ""
+ spark2_version = None
+ if 'spark-defaults' in config['configurations']:
+   spark_home = os.path.join(stack_root, "current", 'spark-client')
+   spark_version = extract_spark_version(spark_home)
+ if 'spark2-defaults' in config['configurations']:
+   spark2_home = os.path.join(stack_root, "current", 'spark2-client')
+   spark2_version = extract_spark_version(spark2_home)
+ 
+ # New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+ version = default("/commandParams/version", None)
 -stack_name = default("/hostLevelParams/stack_name", None)
++stack_name = default("/clusterLevelParams/stack_name", None)
+ 
+ # params from zeppelin-config
+ zeppelin_port = str(config['configurations']['zeppelin-config']['zeppelin.server.port'])
+ zeppelin_interpreter = None
+ if 'zeppelin.interpreter.group.order' in config['configurations']['zeppelin-config']:
+   zeppelin_interpreter = str(config['configurations']['zeppelin-config']
+                              ['zeppelin.interpreter.group.order']).split(",")
+ 
+ # params from zeppelin-env
+ zeppelin_user = config['configurations']['zeppelin-env']['zeppelin_user']
+ zeppelin_group = config['configurations']['zeppelin-env']['zeppelin_group']
+ zeppelin_log_dir = config['configurations']['zeppelin-env']['zeppelin_log_dir']
+ zeppelin_pid_dir = config['configurations']['zeppelin-env']['zeppelin_pid_dir']
+ zeppelin_log_file = os.path.join(zeppelin_log_dir, 'zeppelin-setup.log')
+ zeppelin_hdfs_user_dir = format("/user/{zeppelin_user}")
+ 
+ zeppelin_dir = os.path.join(*[install_dir, zeppelin_dirname])
+ conf_dir = "/etc/zeppelin/conf"
+ external_dependency_conf = "/etc/zeppelin/conf/external-dependency-conf"
+ notebook_dir = os.path.join(*[install_dir, zeppelin_dirname, 'notebook'])
+ 
+ # zeppelin-env.sh
+ zeppelin_env_content = config['configurations']['zeppelin-env']['zeppelin_env_content']
+ 
+ # shiro.ini
+ shiro_ini_content = config['configurations']['zeppelin-shiro-ini']['shiro_ini_content']
+ 
+ # log4j.properties
+ log4j_properties_content = config['configurations']['zeppelin-log4j-properties']['log4j_properties_content']
+ 
+ # detect configs
+ master_configs = config['clusterHostInfo']
 -java64_home = config['hostLevelParams']['java_home']
++java64_home = config['ambariLevelParams']['java_home']
+ ambari_host = str(master_configs['ambari_server_host'][0])
+ zeppelin_host = str(master_configs['zeppelin_master_hosts'][0])
+ ui_ssl_enabled = config['configurations']['zeppelin-config']['zeppelin.ssl']
+ 
+ # detect HS2 details, if installed
+ 
+ hive_server_host = None
+ hive_metastore_host = '0.0.0.0'
+ hive_metastore_port = None
+ hive_server_port = None
+ hive_zookeeper_quorum = None
+ hive_server2_support_dynamic_service_discovery = None
+ is_hive_installed = False
+ hive_zookeeper_namespace = None
+ hive_interactive_zookeeper_namespace = None
+ 
+ if 'hive_server_host' in master_configs and len(master_configs['hive_server_host']) != 0:
+   is_hive_installed = True
+   spark_hive_properties = {
+     'hive.metastore.uris': default('/configurations/hive-site/hive.metastore.uris', '')
+   }
+   hive_server_host = str(master_configs['hive_server_host'][0])
+   hive_metastore_host = str(master_configs['hive_metastore_host'][0])
+   hive_metastore_port = str(
+     get_port_from_url(default('/configurations/hive-site/hive.metastore.uris', '')))
+   hive_server_port = str(config['configurations']['hive-site']['hive.server2.thrift.http.port'])
+   hive_zookeeper_quorum = config['configurations']['hive-site']['hive.zookeeper.quorum']
+   hive_zookeeper_namespace = config['configurations']['hive-site']['hive.server2.zookeeper.namespace']
+   hive_server2_support_dynamic_service_discovery = config['configurations']['hive-site']['hive.server2.support.dynamic.service.discovery']
+ 
+ hive_server_interactive_hosts = None
+ if 'hive_server_interactive_hosts' in master_configs and len(master_configs['hive_server_interactive_hosts']) != 0:
+     hive_server_interactive_hosts = str(master_configs['hive_server_interactive_hosts'][0])
+     hive_interactive_zookeeper_namespace = config['configurations']['hive-interactive-site']['hive.server2.zookeeper.namespace']
+     hive_server_port = str(config['configurations']['hive-site']['hive.server2.thrift.http.port'])
+     hive_zookeeper_quorum = config['configurations']['hive-site']['hive.zookeeper.quorum']
+     hive_server2_support_dynamic_service_discovery = config['configurations']['hive-site']['hive.server2.support.dynamic.service.discovery']
+ 
+ spark_thrift_server_hosts = None
+ spark_hive_thrift_port = None
+ spark_hive_principal = None
+ if 'spark_thriftserver_hosts' in master_configs and len(master_configs['spark_thriftserver_hosts']) != 0:
+   spark_thrift_server_hosts = str(master_configs['spark_thriftserver_hosts'][0])
+   if config['configurations']['spark-hive-site-override']:
+     spark_hive_thrift_port = config['configurations']['spark-hive-site-override']['hive.server2.thrift.port']
+   if config['configurations']['spark-thrift-sparkconf'] and \
+       'spark.sql.hive.hiveserver2.jdbc.url.principal' in config['configurations']['spark-thrift-sparkconf']:
+     spark_hive_principal = config['configurations']['spark-thrift-sparkconf']['spark.sql.hive.hiveserver2.jdbc.url.principal']
+ 
+ spark2_thrift_server_hosts = None
+ spark2_hive_thrift_port = None
+ spark2_hive_principal = None
+ if 'spark2_thriftserver_hosts' in master_configs and len(master_configs['spark2_thriftserver_hosts']) != 0:
+   spark2_thrift_server_hosts = str(master_configs['spark2_thriftserver_hosts'][0])
+   if config['configurations']['spark2-hive-site-override']:
+     spark2_hive_thrift_port = config['configurations']['spark2-hive-site-override']['hive.server2.thrift.port']
+   if config['configurations']['spark2-thrift-sparkconf'] and \
+       'spark.sql.hive.hiveserver2.jdbc.url.principal' in config['configurations']['spark2-thrift-sparkconf']:
+     spark2_hive_principal = config['configurations']['spark2-thrift-sparkconf']['spark.sql.hive.hiveserver2.jdbc.url.principal']
+ 
+ 
+ # detect hbase details if installed
+ zookeeper_znode_parent = None
+ hbase_zookeeper_quorum = None
+ is_hbase_installed = False
+ if 'hbase_master_hosts' in master_configs and 'hbase-site' in config['configurations']:
+   is_hbase_installed = True
+   zookeeper_znode_parent = config['configurations']['hbase-site']['zookeeper.znode.parent']
+   hbase_zookeeper_quorum = config['configurations']['hbase-site']['hbase.zookeeper.quorum']
+ 
+ # detect spark queue
+ if 'spark-defaults' in config['configurations'] and 'spark.yarn.queue' in config['configurations']['spark-defaults']:
+   spark_queue = config['configurations']['spark-defaults']['spark.yarn.queue']
+ elif 'spark2-defaults' in config['configurations'] and 'spark.yarn.queue' in config['configurations']['spark2-defaults']:
+   spark_queue = config['configurations']['spark2-defaults']['spark.yarn.queue']
+ else:
+   spark_queue = 'default'
+ 
+ zeppelin_kerberos_keytab = config['configurations']['zeppelin-env']['zeppelin.server.kerberos.keytab']
+ zeppelin_kerberos_principal = config['configurations']['zeppelin-env']['zeppelin.server.kerberos.principal']
+ if 'zeppelin.interpreter.config.upgrade' in config['configurations']['zeppelin-config']:
+   zeppelin_interpreter_config_upgrade = config['configurations']['zeppelin-config']['zeppelin.interpreter.config.upgrade']
+ else:
+   zeppelin_interpreter_config_upgrade = False
+ 
+ # e.g. 2.3
 -stack_version_unformatted = config['hostLevelParams']['stack_version']
++stack_version_unformatted = config['clusterLevelParams']['stack_version']
+ 
+ # e.g. 2.3.0.0
+ stack_version_formatted = format_stack_version(stack_version_unformatted)
+ major_stack_version = get_major_version(stack_version_formatted)
+ 
+ # e.g. 2.3.0.0-2130
+ full_stack_version = default("/commandParams/version", None)
+ 
+ spark_client_version = get_stack_version('spark-client')
+ 
+ hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+ livy_hosts = default("/clusterHostInfo/livy_server_hosts", [])
+ livy2_hosts = default("/clusterHostInfo/livy2_server_hosts", [])
+ 
+ livy_livyserver_host = None
+ livy_livyserver_port = None
+ livy2_livyserver_host = None
+ livy2_livyserver_port = None
+ if stack_version_formatted and check_stack_feature(StackFeature.SPARK_LIVY, stack_version_formatted) and \
+     len(livy_hosts) > 0:
+   livy_livyserver_host = str(livy_hosts[0])
+   livy_livyserver_port = config['configurations']['livy-conf']['livy.server.port']
+ 
+ if stack_version_formatted and check_stack_feature(StackFeature.SPARK_LIVY2, stack_version_formatted) and \
+     len(livy2_hosts) > 0:
+   livy2_livyserver_host = str(livy2_hosts[0])
+   livy2_livyserver_port = config['configurations']['livy2-conf']['livy.server.port']
+ 
+ hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+ security_enabled = config['configurations']['cluster-env']['security_enabled']
+ hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+ kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+ hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+ hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+ hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+ hdfs_site = config['configurations']['hdfs-site']
+ default_fs = config['configurations']['core-site']['fs.defaultFS']
+ 
+ # create partial functions with common arguments for every HdfsResource call
+ # to create hdfs directory we need to call params.HdfsResource in code
+ HdfsResource = functools.partial(
+   HdfsResource,
+   user=hdfs_user,
+   hdfs_resource_ignore_file="/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+   security_enabled=security_enabled,
+   keytab=hdfs_user_keytab,
+   kinit_path_local=kinit_path_local,
+   hadoop_bin_dir=hadoop_bin_dir,
+   hadoop_conf_dir=hadoop_conf_dir,
+   principal_name=hdfs_principal_name,
+   hdfs_site=hdfs_site,
+   default_fs=default_fs
+ )

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/scripts/params_linux.py
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/resources/custom_actions/scripts/check_host.py
----------------------------------------------------------------------
diff --cc ambari-server/src/main/resources/custom_actions/scripts/check_host.py
index 53fad7e,3ca2909..e610307
--- a/ambari-server/src/main/resources/custom_actions/scripts/check_host.py
+++ b/ambari-server/src/main/resources/custom_actions/scripts/check_host.py
@@@ -110,10 -110,11 +110,11 @@@ class CheckHost(Script)
    IGNORE_REPOS = [
      "HDP-UTILS", "AMBARI", "BASE", "EXTRAS"
    ]
 -  
 +
    def __init__(self):
      self.reportFileHandler = HostCheckReportFileHandler()
+     self.pkg_provider = get_provider("Package")
 -  
 +
    def actionexecute(self, env):
      Logger.info("Host checks started.")
      config = Script.get_config()

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/resources/custom_actions/scripts/ru_execute_tasks.py
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/resources/custom_actions/scripts/update_repo.py
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --cc ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
index 4f4386d,b517eba..f9490be
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
@@@ -39,11 -39,9 +39,9 @@@ host_sys_prepped = default("/ambariLeve
  
  sudo = AMBARI_SUDO_BINARY
  
 -stack_version_unformatted = config['hostLevelParams']['stack_version']
 +stack_version_unformatted = config['clusterLevelParams']['stack_version']
  stack_version_formatted = format_stack_version(stack_version_unformatted)
- 
- # current host stack version
- current_version = default("/hostLevelParams/current_version", None)
+ major_stack_version = get_major_version(stack_version_formatted)
  
  # service name
  service_name = config['serviceName']

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------


Mime
View raw message