ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From vsai...@apache.org
Subject [2/5] ambari git commit: AMBARI-21901.Add 0.7.x stack definition for Zeppelin(Prabhjyot Singh via Venkata Sairam)
Date Mon, 18 Sep 2017 06:56:45 GMT
http://git-wip-us.apache.org/repos/asf/ambari/blob/190094ba/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/scripts/master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/scripts/master.py b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/scripts/master.py
new file mode 100644
index 0000000..ba73d10
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/scripts/master.py
@@ -0,0 +1,522 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import glob
+import os
+
+from resource_management.core import shell, sudo
+from resource_management.core.logger import Logger
+from resource_management.core.resources import Directory
+from resource_management.core.resources.system import Execute, File
+from resource_management.core.source import InlineTemplate
+from resource_management.libraries import XmlConfig
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.script.script import Script
+
+
+class Master(Script):
+  def install(self, env):
+    import params
+    env.set_params(params)
+    self.install_packages(env)
+
+    self.create_zeppelin_log_dir(env)
+
+    if params.spark_version:
+      Execute('echo spark_version:' + str(params.spark_version) + ' detected for spark_home: '
+              + params.spark_home + ' >> ' + params.zeppelin_log_file, user=params.zeppelin_user)
+    if params.spark2_version:
+      Execute('echo spark2_version:' + str(params.spark2_version) + ' detected for spark2_home: '
+              + params.spark2_home + ' >> ' + params.zeppelin_log_file, user=params.zeppelin_user)
+
+  def create_zeppelin_dir(self, params):
+    params.HdfsResource(format("/user/{zeppelin_user}"),
+                        type="directory",
+                        action="create_on_execute",
+                        owner=params.zeppelin_user,
+                        recursive_chown=True,
+                        recursive_chmod=True
+                        )
+    params.HdfsResource(format("/user/{zeppelin_user}/test"),
+                        type="directory",
+                        action="create_on_execute",
+                        owner=params.zeppelin_user,
+                        recursive_chown=True,
+                        recursive_chmod=True
+                        )
+    params.HdfsResource(format("/apps/zeppelin"),
+                        type="directory",
+                        action="create_on_execute",
+                        owner=params.zeppelin_user,
+                        recursive_chown=True,
+                        recursive_chmod=True
+                        )
+
+    spark_deps_full_path = self.get_zeppelin_spark_dependencies()[0]
+    spark_dep_file_name = os.path.basename(spark_deps_full_path)
+
+    params.HdfsResource(params.spark_jar_dir + "/" + spark_dep_file_name,
+                        type="file",
+                        action="create_on_execute",
+                        source=spark_deps_full_path,
+                        group=params.zeppelin_group,
+                        owner=params.zeppelin_user,
+                        mode=0444,
+                        replace_existing_files=True,
+                        )
+
+    params.HdfsResource(None, action="execute")
+
+  def create_zeppelin_log_dir(self, env):
+    import params
+    env.set_params(params)
+    Directory([params.zeppelin_log_dir],
+              owner=params.zeppelin_user,
+              group=params.zeppelin_group,
+              cd_access="a",
+              create_parents=True,
+              mode=0755
+              )
+
+  def create_zeppelin_hdfs_conf_dir(self, env):
+    import params
+    env.set_params(params)
+    Directory([params.external_dependency_conf],
+              owner=params.zeppelin_user,
+              group=params.zeppelin_group,
+              cd_access="a",
+              create_parents=True,
+              mode=0755
+              )
+
+  def chown_zeppelin_pid_dir(self, env):
+    import params
+    env.set_params(params)
+    Execute(("chown", "-R", format("{zeppelin_user}") + ":" + format("{zeppelin_group}"), params.zeppelin_pid_dir),
+            sudo=True)
+
+  def configure(self, env):
+    import params
+    import status_params
+    env.set_params(params)
+    env.set_params(status_params)
+    self.create_zeppelin_log_dir(env)
+
+    # create the pid and zeppelin dirs
+    Directory([params.zeppelin_pid_dir, params.zeppelin_dir],
+              owner=params.zeppelin_user,
+              group=params.zeppelin_group,
+              cd_access="a",
+              create_parents=True,
+              mode=0755
+    )
+    self.chown_zeppelin_pid_dir(env)
+
+    # write out zeppelin-site.xml
+    XmlConfig("zeppelin-site.xml",
+              conf_dir=params.conf_dir,
+              configurations=params.config['configurations']['zeppelin-config'],
+              owner=params.zeppelin_user,
+              group=params.zeppelin_group
+              )
+    # write out zeppelin-env.sh
+    env_content = InlineTemplate(params.zeppelin_env_content)
+    File(format("{params.conf_dir}/zeppelin-env.sh"), content=env_content,
+         owner=params.zeppelin_user, group=params.zeppelin_group)
+
+    # write out shiro.ini
+    shiro_ini_content = InlineTemplate(params.shiro_ini_content)
+    File(format("{params.conf_dir}/shiro.ini"), content=shiro_ini_content,
+         owner=params.zeppelin_user, group=params.zeppelin_group)
+
+    # write out log4j.properties
+    File(format("{params.conf_dir}/log4j.properties"), content=params.log4j_properties_content,
+         owner=params.zeppelin_user, group=params.zeppelin_group)
+
+    self.create_zeppelin_hdfs_conf_dir(env)
+
+    if len(params.hbase_master_hosts) > 0 and params.is_hbase_installed:
+      # copy hbase-site.xml
+      XmlConfig("hbase-site.xml",
+              conf_dir=params.external_dependency_conf,
+              configurations=params.config['configurations']['hbase-site'],
+              configuration_attributes=params.config['configuration_attributes']['hbase-site'],
+              owner=params.zeppelin_user,
+              group=params.zeppelin_group,
+              mode=0644)
+
+      XmlConfig("hdfs-site.xml",
+                conf_dir=params.external_dependency_conf,
+                configurations=params.config['configurations']['hdfs-site'],
+                configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+                owner=params.zeppelin_user,
+                group=params.zeppelin_group,
+                mode=0644)
+
+      XmlConfig("core-site.xml",
+                conf_dir=params.external_dependency_conf,
+                configurations=params.config['configurations']['core-site'],
+                configuration_attributes=params.config['configuration_attributes']['core-site'],
+                owner=params.zeppelin_user,
+                group=params.zeppelin_group,
+                mode=0644)
+
+  def check_and_copy_notebook_in_hdfs(self, params):
+    if params.config['configurations']['zeppelin-config']['zeppelin.notebook.dir'].startswith("/"):
+      notebook_directory = params.config['configurations']['zeppelin-config']['zeppelin.notebook.dir']
+    else:
+      notebook_directory = "/user/" + format("{zeppelin_user}") + "/" + \
+                           params.config['configurations']['zeppelin-config']['zeppelin.notebook.dir']
+
+    kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+    kinit_if_needed = format("{kinit_path_local} -kt {zeppelin_kerberos_keytab} {zeppelin_kerberos_principal};")
+
+    notebook_directory_exists = shell.call(format("{kinit_if_needed} hdfs --config {hadoop_conf_dir} dfs -test -e {notebook_directory};echo $?"),
+                                           user=params.zeppelin_user)[1]
+
+    #if there is no kerberos setup then the string will contain "-bash: kinit: command not found"
+    if "\n" in notebook_directory_exists:
+      notebook_directory_exists = notebook_directory_exists.split("\n")[1]
+
+    # '1' means it does not exists
+    if notebook_directory_exists == '1':
+      # hdfs dfs -mkdir {notebook_directory}
+      params.HdfsResource(format("{notebook_directory}"),
+                          type="directory",
+                          action="create_on_execute",
+                          owner=params.zeppelin_user,
+                          recursive_chown=True,
+                          recursive_chmod=True
+                          )
+
+      # hdfs dfs -put /usr/hdp/current/zeppelin-server/notebook/ {notebook_directory}
+      params.HdfsResource(format("{notebook_directory}"),
+                            type="directory",
+                            action="create_on_execute",
+                            source=params.notebook_dir,
+                            owner=params.zeppelin_user,
+                            recursive_chown=True,
+                            recursive_chmod=True
+                            )
+
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    self.create_zeppelin_log_dir(env)
+    self.chown_zeppelin_pid_dir(env)
+    Execute(params.zeppelin_dir + '/bin/zeppelin-daemon.sh stop >> ' + params.zeppelin_log_file,
+            user=params.zeppelin_user)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    import status_params
+    self.configure(env)
+
+    Execute(("chown", "-R", format("{zeppelin_user}") + ":" + format("{zeppelin_group}"), "/etc/zeppelin"),
+            sudo=True)
+    Execute(("chown", "-R", format("{zeppelin_user}") + ":" + format("{zeppelin_group}"),
+             os.path.join(params.zeppelin_dir, "notebook")), sudo=True)
+
+    if 'zeppelin.notebook.storage' in params.config['configurations']['zeppelin-config'] \
+        and params.config['configurations']['zeppelin-config']['zeppelin.notebook.storage'] == 'org.apache.zeppelin.notebook.repo.HdfsNotebookRepo':
+      self.check_and_copy_notebook_in_hdfs(params)
+
+    if params.security_enabled:
+        zeppelin_kinit_cmd = format("{kinit_path_local} -kt {zeppelin_kerberos_keytab} {zeppelin_kerberos_principal}; ")
+        Execute(zeppelin_kinit_cmd, user=params.zeppelin_user)
+
+    zeppelin_spark_dependencies = self.get_zeppelin_spark_dependencies()
+    if zeppelin_spark_dependencies and os.path.exists(zeppelin_spark_dependencies[0]):
+      self.create_zeppelin_dir(params)
+
+    # if first_setup:
+    if not glob.glob(params.conf_dir + "/interpreter.json") and \
+      not os.path.exists(params.conf_dir + "/interpreter.json"):
+      self.create_interpreter_json()
+      self.update_zeppelin_interpreter()
+
+    if params.zeppelin_interpreter_config_upgrade == True:
+      self.reset_interpreter_settings()
+      self.update_zeppelin_interpreter()
+
+    Execute(params.zeppelin_dir + '/bin/zeppelin-daemon.sh restart >> '
+            + params.zeppelin_log_file, user=params.zeppelin_user)
+    pidfile = glob.glob(os.path.join(status_params.zeppelin_pid_dir,
+                                     'zeppelin-' + params.zeppelin_user + '*.pid'))[0]
+    Logger.info(format("Pid file is: {pidfile}"))
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+
+    try:
+        pid_file = glob.glob(status_params.zeppelin_pid_dir + '/zeppelin-' +
+                             status_params.zeppelin_user + '*.pid')[0]
+    except IndexError:
+        pid_file = ''
+    check_process_status(pid_file)
+
+  def reset_interpreter_settings(self):
+    import json
+    import interpreter_json_template
+    interpreter_json_template = json.loads(interpreter_json_template.template)['interpreterSettings']
+    config_data = self.get_interpreter_settings()
+    interpreter_settings = config_data['interpreterSettings']
+
+    for setting_key in interpreter_json_template.keys():
+      if setting_key not in interpreter_settings:
+        interpreter_settings[setting_key] = interpreter_json_template[
+          setting_key]
+
+    self.set_interpreter_settings(config_data)
+
+  def get_interpreter_settings(self):
+    import params
+    import json
+
+    interpreter_config = os.path.join(params.conf_dir, "interpreter.json")
+    config_content = sudo.read_file(interpreter_config)
+    config_data = json.loads(config_content)
+    return config_data
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, format_stack_version(params.version)):
+      stack_select.select_packages(params.version)
+
+  def set_interpreter_settings(self, config_data):
+    import params
+    import json
+
+    interpreter_config = os.path.join(params.conf_dir, "interpreter.json")
+    File(interpreter_config,
+         group=params.zeppelin_group,
+         owner=params.zeppelin_user,
+         content=json.dumps(config_data, indent=2)
+         )
+
+  def update_kerberos_properties(self):
+    import params
+    config_data = self.get_interpreter_settings()
+    interpreter_settings = config_data['interpreterSettings']
+    for interpreter_setting in interpreter_settings:
+      interpreter = interpreter_settings[interpreter_setting]
+      if interpreter['group'] == 'livy' and params.livy_livyserver_host:
+        if params.zeppelin_kerberos_principal and params.zeppelin_kerberos_keytab and params.security_enabled:
+          interpreter['properties']['zeppelin.livy.principal'] = params.zeppelin_kerberos_principal
+          interpreter['properties']['zeppelin.livy.keytab'] = params.zeppelin_kerberos_keytab
+        else:
+          interpreter['properties']['zeppelin.livy.principal'] = ""
+          interpreter['properties']['zeppelin.livy.keytab'] = ""
+      elif interpreter['group'] == 'spark':
+        if params.zeppelin_kerberos_principal and params.zeppelin_kerberos_keytab and params.security_enabled:
+          interpreter['properties']['spark.yarn.principal'] = params.zeppelin_kerberos_principal
+          interpreter['properties']['spark.yarn.keytab'] = params.zeppelin_kerberos_keytab
+        else:
+          interpreter['properties']['spark.yarn.principal'] = ""
+          interpreter['properties']['spark.yarn.keytab'] = ""
+      elif interpreter['group'] == 'jdbc':
+        if params.zeppelin_kerberos_principal and params.zeppelin_kerberos_keytab and params.security_enabled:
+          interpreter['properties']['zeppelin.jdbc.auth.type'] = "KERBEROS"
+          interpreter['properties']['zeppelin.jdbc.principal'] = params.zeppelin_kerberos_principal
+          interpreter['properties']['zeppelin.jdbc.keytab.location'] = params.zeppelin_kerberos_keytab
+          if params.zookeeper_znode_parent \
+              and params.hbase_zookeeper_quorum \
+              and 'phoenix.url' in interpreter['properties'] \
+              and params.zookeeper_znode_parent not in interpreter['properties']['phoenix.url']:
+            interpreter['properties']['phoenix.url'] = "jdbc:phoenix:" + \
+                                                       params.hbase_zookeeper_quorum + ':' + \
+                                                       params.zookeeper_znode_parent
+        else:
+          interpreter['properties']['zeppelin.jdbc.auth.type'] = "SIMPLE"
+          interpreter['properties']['zeppelin.jdbc.principal'] = ""
+          interpreter['properties']['zeppelin.jdbc.keytab.location'] = ""
+      elif interpreter['group'] == 'sh':
+        if params.zeppelin_kerberos_principal and params.zeppelin_kerberos_keytab and params.security_enabled:
+          interpreter['properties']['zeppelin.shell.auth.type'] = "KERBEROS"
+          interpreter['properties']['zeppelin.shell.principal'] = params.zeppelin_kerberos_principal
+          interpreter['properties']['zeppelin.shell.keytab.location'] = params.zeppelin_kerberos_keytab
+        else:
+          interpreter['properties']['zeppelin.shell.auth.type'] = ""
+          interpreter['properties']['zeppelin.shell.principal'] = ""
+          interpreter['properties']['zeppelin.shell.keytab.location'] = ""
+
+    self.set_interpreter_settings(config_data)
+
+  def update_zeppelin_interpreter(self):
+    import params
+    config_data = self.get_interpreter_settings()
+    interpreter_settings = config_data['interpreterSettings']
+
+    if 'spark2-defaults' in params.config['configurations']:
+      spark2_config = self.get_spark2_interpreter_config()
+      config_id = spark2_config["id"]
+      interpreter_settings[config_id] = spark2_config
+
+    if params.livy2_livyserver_host:
+      livy2_config = self.get_livy2_interpreter_config()
+      config_id = livy2_config["id"]
+      interpreter_settings[config_id] = livy2_config
+
+    if params.zeppelin_interpreter:
+      settings_to_delete = []
+      for settings_key, interpreter in interpreter_settings.items():
+        if interpreter['group'] not in params.zeppelin_interpreter:
+          settings_to_delete.append(settings_key)
+
+      for key in settings_to_delete:
+        del interpreter_settings[key]
+
+    hive_interactive_properties_key = 'hive_interactive'
+    for setting_key in interpreter_settings.keys():
+      interpreter = interpreter_settings[setting_key]
+      if interpreter['group'] == 'jdbc':
+        interpreter['dependencies'] = []
+
+        if not params.hive_server_host and params.hive_server_interactive_hosts:
+          hive_interactive_properties_key = 'hive'
+
+        if params.hive_server_host:
+          interpreter['properties']['hive.driver'] = 'org.apache.hive.jdbc.HiveDriver'
+          interpreter['properties']['hive.user'] = 'hive'
+          interpreter['properties']['hive.password'] = ''
+          interpreter['properties']['hive.proxy.user.property'] = 'hive.server2.proxy.user'
+          if params.hive_server2_support_dynamic_service_discovery:
+            interpreter['properties']['hive.url'] = 'jdbc:hive2://' + \
+                                                 params.hive_zookeeper_quorum + \
+                                                 '/;' + 'serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=' + \
+                                                    params.hive_zookeeper_namespace
+          else:
+            interpreter['properties']['hive.url'] = 'jdbc:hive2://' + \
+                                                 params.hive_server_host + \
+                                                     ':' + params.hive_server_port
+        if params.hive_server_interactive_hosts:
+          interpreter['properties'][hive_interactive_properties_key + '.driver'] = 'org.apache.hive.jdbc.HiveDriver'
+          interpreter['properties'][hive_interactive_properties_key + '.user'] = 'hive'
+          interpreter['properties'][hive_interactive_properties_key + '.password'] = ''
+          interpreter['properties'][hive_interactive_properties_key + '.proxy.user.property'] = 'hive.server2.proxy.user'
+          if params.hive_server2_support_dynamic_service_discovery:
+            interpreter['properties'][hive_interactive_properties_key + '.url'] = 'jdbc:hive2://' + \
+                                                    params.hive_zookeeper_quorum + \
+                                                    '/;' + 'serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=' + \
+                                                    params.hive_interactive_zookeeper_namespace
+          else:
+            interpreter['properties'][hive_interactive_properties_key + '.url'] = 'jdbc:hive2://' + \
+                                                    params.hive_server_interactive_hosts + \
+                                                    ':' + params.hive_server_port
+
+        if params.spark_thrift_server_hosts:
+          interpreter['properties']['spark.driver'] = 'org.apache.hive.jdbc.HiveDriver'
+          interpreter['properties']['spark.user'] = 'hive'
+          interpreter['properties']['spark.password'] = ''
+          interpreter['properties']['spark.proxy.user.property'] = 'hive.server2.proxy.user'
+          interpreter['properties']['spark.url'] = 'jdbc:hive2://' + \
+              params.spark_thrift_server_hosts + ':' + params.spark_hive_thrift_port + '/'
+          if params.spark_hive_principal:
+            interpreter['properties']['spark.url'] += ';principal=' + params.spark_hive_principal
+
+        if params.spark2_thrift_server_hosts:
+          interpreter['properties']['spark2.driver'] = 'org.apache.hive.jdbc.HiveDriver'
+          interpreter['properties']['spark2.user'] = 'hive'
+          interpreter['properties']['spark2.password'] = ''
+          interpreter['properties']['spark2.proxy.user.property'] = 'hive.server2.proxy.user'
+          interpreter['properties']['spark2.url'] = 'jdbc:hive2://' + \
+              params.spark2_thrift_server_hosts + ':' + params.spark2_hive_thrift_port + '/'
+          if params.spark_hive_principal:
+            interpreter['properties']['spark2.url'] += ';principal=' + params.spark2_hive_principal
+
+        if params.zookeeper_znode_parent \
+                and params.hbase_zookeeper_quorum:
+            interpreter['properties']['phoenix.driver'] = 'org.apache.phoenix.jdbc.PhoenixDriver'
+            interpreter['properties']['phoenix.hbase.client.retries.number'] = '1'
+            interpreter['properties']['phoenix.user'] = 'phoenixuser'
+            interpreter['properties']['phoenix.password'] = ''
+            interpreter['properties']['phoenix.url'] = "jdbc:phoenix:" + \
+                                                    params.hbase_zookeeper_quorum + ':' + \
+                                                    params.zookeeper_znode_parent
+
+      elif interpreter['group'] == 'livy' and interpreter['name'] == 'livy':
+        if params.livy_livyserver_host:
+          interpreter['properties']['zeppelin.livy.url'] = "http://" + params.livy_livyserver_host + \
+                                                           ":" + params.livy_livyserver_port
+        else:
+          del interpreter_settings[setting_key]
+
+      elif interpreter['group'] == 'livy' and interpreter['name'] == 'livy2':
+        if params.livy2_livyserver_host:
+          interpreter['properties']['zeppelin.livy.url'] = "http://" + params.livy2_livyserver_host + \
+                                                           ":" + params.livy2_livyserver_port
+        else:
+          del interpreter_settings[setting_key]
+
+
+      elif interpreter['group'] == 'spark' and interpreter['name'] == 'spark':
+        if 'spark-env' in params.config['configurations']:
+          interpreter['properties']['master'] = "yarn-client"
+          interpreter['properties']['SPARK_HOME'] = "/usr/hdp/current/spark-client/"
+        else:
+          del interpreter_settings[setting_key]
+
+      elif interpreter['group'] == 'spark' and interpreter['name'] == 'spark2':
+        if 'spark2-env' in params.config['configurations']:
+          interpreter['properties']['master'] = "yarn-client"
+          interpreter['properties']['SPARK_HOME'] = "/usr/hdp/current/spark2-client/"
+        else:
+          del interpreter_settings[setting_key]
+
+    self.set_interpreter_settings(config_data)
+    self.update_kerberos_properties()
+
+  def create_interpreter_json(self):
+    import interpreter_json_template
+    import params
+
+    interpreter_json = interpreter_json_template.template
+    File(format("{params.conf_dir}/interpreter.json"), content=interpreter_json,
+         owner=params.zeppelin_user, group=params.zeppelin_group)
+
+  def get_zeppelin_spark_dependencies(self):
+    import params
+    return glob.glob(params.zeppelin_dir + '/interpreter/spark/dep/zeppelin-spark-dependencies*.jar')
+
+  def get_spark2_interpreter_config(self):
+    import spark2_config_template
+    import json
+
+    return json.loads(spark2_config_template.template)
+
+  def get_livy2_interpreter_config(self):
+    import livy2_config_template
+    import json
+
+    return json.loads(livy2_config_template.template)
+
+if __name__ == "__main__":
+  Master().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/190094ba/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/scripts/params.py
new file mode 100644
index 0000000..3242f26
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/scripts/params.py
@@ -0,0 +1,258 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import functools
+import os
+import re
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.get_stack_version import get_stack_version
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.version import format_stack_version, get_major_version
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.script.script import Script
+
+def get_port_from_url(address):
+  if not (address is None):
+    return address.split(':')[-1]
+  else:
+    return address
+
+def extract_spark_version(spark_home):
+  try:
+    with open(spark_home + "/RELEASE") as fline:
+      return re.search('Spark (\d\.\d).+', fline.readline().rstrip()).group(1)
+  except:
+    pass
+  return None
+
+
+# server configurations
+config = Script.get_config()
+stack_root = Script.get_stack_root()
+
+# e.g. /var/lib/ambari-agent/cache/stacks/HDP/2.2/services/zeppelin-stack/package
+service_packagedir = os.path.realpath(__file__).split('/scripts')[0]
+
+zeppelin_dirname = 'zeppelin-server'
+
+install_dir = os.path.join(stack_root, "current")
+executor_mem = config['configurations']['zeppelin-env']['zeppelin.executor.mem']
+executor_instances = config['configurations']['zeppelin-env'][
+  'zeppelin.executor.instances']
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+spark_jar_dir = config['configurations']['zeppelin-env']['zeppelin.spark.jar.dir']
+spark_jar = format("{spark_jar_dir}/zeppelin-spark-0.5.5-SNAPSHOT.jar")
+setup_view = True
+temp_file = config['configurations']['zeppelin-env']['zeppelin.temp.file']
+
+spark_home = ""
+spark_version = None
+spark2_home = ""
+spark2_version = None
+if 'spark-defaults' in config['configurations']:
+  spark_home = os.path.join(stack_root, "current", 'spark-client')
+  spark_version = extract_spark_version(spark_home)
+if 'spark2-defaults' in config['configurations']:
+  spark2_home = os.path.join(stack_root, "current", 'spark2-client')
+  spark2_version = extract_spark_version(spark2_home)
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+stack_name = default("/hostLevelParams/stack_name", None)
+
+# params from zeppelin-config
+zeppelin_port = str(config['configurations']['zeppelin-config']['zeppelin.server.port'])
+zeppelin_interpreter = None
+if 'zeppelin.interpreter.group.order' in config['configurations']['zeppelin-config']:
+  zeppelin_interpreter = str(config['configurations']['zeppelin-config']
+                             ['zeppelin.interpreter.group.order']).split(",")
+
+# params from zeppelin-env
+zeppelin_user = config['configurations']['zeppelin-env']['zeppelin_user']
+zeppelin_group = config['configurations']['zeppelin-env']['zeppelin_group']
+zeppelin_log_dir = config['configurations']['zeppelin-env']['zeppelin_log_dir']
+zeppelin_pid_dir = config['configurations']['zeppelin-env']['zeppelin_pid_dir']
+zeppelin_log_file = os.path.join(zeppelin_log_dir, 'zeppelin-setup.log')
+zeppelin_hdfs_user_dir = format("/user/{zeppelin_user}")
+
+zeppelin_dir = os.path.join(*[install_dir, zeppelin_dirname])
+conf_dir = "/etc/zeppelin/conf"
+external_dependency_conf = "/etc/zeppelin/conf/external-dependency-conf"
+notebook_dir = os.path.join(*[install_dir, zeppelin_dirname, 'notebook'])
+
+# zeppelin-env.sh
+zeppelin_env_content = config['configurations']['zeppelin-env']['zeppelin_env_content']
+
+# shiro.ini
+shiro_ini_content = config['configurations']['zeppelin-shiro-ini']['shiro_ini_content']
+
+# log4j.properties
+log4j_properties_content = config['configurations']['zeppelin-log4j-properties']['log4j_properties_content']
+
+# detect configs
+master_configs = config['clusterHostInfo']
+java64_home = config['hostLevelParams']['java_home']
+ambari_host = str(master_configs['ambari_server_host'][0])
+zeppelin_host = str(master_configs['zeppelin_master_hosts'][0])
+ui_ssl_enabled = config['configurations']['zeppelin-config']['zeppelin.ssl']
+
+# detect HS2 details, if installed
+
+hive_server_host = None
+hive_metastore_host = '0.0.0.0'
+hive_metastore_port = None
+hive_server_port = None
+hive_zookeeper_quorum = None
+hive_server2_support_dynamic_service_discovery = None
+is_hive_installed = False
+hive_zookeeper_namespace = None
+hive_interactive_zookeeper_namespace = None
+
+if 'hive_server_host' in master_configs and len(master_configs['hive_server_host']) != 0:
+  is_hive_installed = True
+  spark_hive_properties = {
+    'hive.metastore.uris': default('/configurations/hive-site/hive.metastore.uris', '')
+  }
+  hive_server_host = str(master_configs['hive_server_host'][0])
+  hive_metastore_host = str(master_configs['hive_metastore_host'][0])
+  hive_metastore_port = str(
+    get_port_from_url(default('/configurations/hive-site/hive.metastore.uris', '')))
+  hive_server_port = str(config['configurations']['hive-site']['hive.server2.thrift.http.port'])
+  hive_zookeeper_quorum = config['configurations']['hive-site']['hive.zookeeper.quorum']
+  hive_zookeeper_namespace = config['configurations']['hive-site']['hive.server2.zookeeper.namespace']
+  hive_server2_support_dynamic_service_discovery = config['configurations']['hive-site']['hive.server2.support.dynamic.service.discovery']
+
+hive_server_interactive_hosts = None
+if 'hive_server_interactive_hosts' in master_configs and len(master_configs['hive_server_interactive_hosts']) != 0:
+    hive_server_interactive_hosts = str(master_configs['hive_server_interactive_hosts'][0])
+    hive_interactive_zookeeper_namespace = config['configurations']['hive-interactive-site']['hive.server2.zookeeper.namespace']
+    hive_server_port = str(config['configurations']['hive-site']['hive.server2.thrift.http.port'])
+    hive_zookeeper_quorum = config['configurations']['hive-site']['hive.zookeeper.quorum']
+    hive_server2_support_dynamic_service_discovery = config['configurations']['hive-site']['hive.server2.support.dynamic.service.discovery']
+
+spark_thrift_server_hosts = None
+spark_hive_thrift_port = None
+spark_hive_principal = None
+if 'spark_thriftserver_hosts' in master_configs and len(master_configs['spark_thriftserver_hosts']) != 0:
+  spark_thrift_server_hosts = str(master_configs['spark_thriftserver_hosts'][0])
+  if config['configurations']['spark-hive-site-override']:
+    spark_hive_thrift_port = config['configurations']['spark-hive-site-override']['hive.server2.thrift.port']
+  if config['configurations']['spark-thrift-sparkconf'] and \
+      'spark.sql.hive.hiveserver2.jdbc.url.principal' in config['configurations']['spark-thrift-sparkconf']:
+    spark_hive_principal = config['configurations']['spark-thrift-sparkconf']['spark.sql.hive.hiveserver2.jdbc.url.principal']
+
+spark2_thrift_server_hosts = None
+spark2_hive_thrift_port = None
+spark2_hive_principal = None
+if 'spark2_thriftserver_hosts' in master_configs and len(master_configs['spark2_thriftserver_hosts']) != 0:
+  spark2_thrift_server_hosts = str(master_configs['spark2_thriftserver_hosts'][0])
+  if config['configurations']['spark2-hive-site-override']:
+    spark2_hive_thrift_port = config['configurations']['spark2-hive-site-override']['hive.server2.thrift.port']
+  if config['configurations']['spark2-thrift-sparkconf'] and \
+      'spark.sql.hive.hiveserver2.jdbc.url.principal' in config['configurations']['spark2-thrift-sparkconf']:
+    spark2_hive_principal = config['configurations']['spark2-thrift-sparkconf']['spark.sql.hive.hiveserver2.jdbc.url.principal']
+
+
+# detect hbase details if installed
+zookeeper_znode_parent = None
+hbase_zookeeper_quorum = None
+is_hbase_installed = False
+if 'hbase_master_hosts' in master_configs and 'hbase-site' in config['configurations']:
+  is_hbase_installed = True
+  zookeeper_znode_parent = config['configurations']['hbase-site']['zookeeper.znode.parent']
+  hbase_zookeeper_quorum = config['configurations']['hbase-site']['hbase.zookeeper.quorum']
+
+# detect spark queue
+if 'spark-defaults' in config['configurations'] and 'spark.yarn.queue' in config['configurations']['spark-defaults']:
+  spark_queue = config['configurations']['spark-defaults']['spark.yarn.queue']
+elif 'spark2-defaults' in config['configurations'] and 'spark.yarn.queue' in config['configurations']['spark2-defaults']:
+  spark_queue = config['configurations']['spark2-defaults']['spark.yarn.queue']
+else:
+  spark_queue = 'default'
+
+zeppelin_kerberos_keytab = config['configurations']['zeppelin-env']['zeppelin.server.kerberos.keytab']
+zeppelin_kerberos_principal = config['configurations']['zeppelin-env']['zeppelin.server.kerberos.principal']
+if 'zeppelin.interpreter.config.upgrade' in config['configurations']['zeppelin-config']:
+  zeppelin_interpreter_config_upgrade = config['configurations']['zeppelin-config']['zeppelin.interpreter.config.upgrade']
+else:
+  zeppelin_interpreter_config_upgrade = False
+
+# e.g. 2.3
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+
+# e.g. 2.3.0.0
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+major_stack_version = get_major_version(stack_version_formatted)
+
+# e.g. 2.3.0.0-2130
+full_stack_version = default("/commandParams/version", None)
+
+spark_client_version = get_stack_version('spark-client')
+
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+livy_hosts = default("/clusterHostInfo/livy_server_hosts", [])
+livy2_hosts = default("/clusterHostInfo/livy2_server_hosts", [])
+
+livy_livyserver_host = None
+livy_livyserver_port = None
+livy2_livyserver_host = None
+livy2_livyserver_port = None
+if stack_version_formatted and check_stack_feature(StackFeature.SPARK_LIVY, stack_version_formatted) and \
+    len(livy_hosts) > 0:
+  livy_livyserver_host = str(livy_hosts[0])
+  livy_livyserver_port = config['configurations']['livy-conf']['livy.server.port']
+
+if stack_version_formatted and check_stack_feature(StackFeature.SPARK_LIVY2, stack_version_formatted) and \
+    len(livy2_hosts) > 0:
+  livy2_livyserver_host = str(livy2_hosts[0])
+  livy2_livyserver_port = config['configurations']['livy2-conf']['livy.server.port']
+
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+# create partial functions with common arguments for every HdfsResource call
+# to create hdfs directory we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  hdfs_resource_ignore_file="/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+  security_enabled=security_enabled,
+  keytab=hdfs_user_keytab,
+  kinit_path_local=kinit_path_local,
+  hadoop_bin_dir=hadoop_bin_dir,
+  hadoop_conf_dir=hadoop_conf_dir,
+  principal_name=hdfs_principal_name,
+  hdfs_site=hdfs_site,
+  default_fs=default_fs
+)

http://git-wip-us.apache.org/repos/asf/ambari/blob/190094ba/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/scripts/service_check.py
new file mode 100644
index 0000000..bd7c855
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/scripts/service_check.py
@@ -0,0 +1,39 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agree in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.core.resources.system import Execute
+
+class ZeppelinServiceCheck(Script):
+    def service_check(self, env):
+        import params
+        env.set_params(params)
+
+        if params.security_enabled:
+          zeppelin_kinit_cmd = format("{kinit_path_local} -kt {zeppelin_kerberos_keytab} {zeppelin_kerberos_principal}; ")
+          Execute(zeppelin_kinit_cmd, user=params.zeppelin_user)
+
+        scheme = "https" if params.ui_ssl_enabled else "http"
+        Execute(format("curl -s -o /dev/null -w'%{{http_code}}' --negotiate -u: -k {scheme}://{zeppelin_host}:{zeppelin_port} | grep 200"),
+                tries = 10,
+                try_sleep=3,
+                logoutput=True)
+
+if __name__ == "__main__":
+    ZeppelinServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/190094ba/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/scripts/spark2_config_template.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/scripts/spark2_config_template.py b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/scripts/spark2_config_template.py
new file mode 100644
index 0000000..28a63c6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/scripts/spark2_config_template.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+template = '''
+{
+  "id": "2C4U48MY3_spark2",
+  "name": "spark2",
+  "group": "spark",
+  "properties": {
+    "spark.executor.memory": "",
+    "args": "",
+    "zeppelin.spark.printREPLOutput": "true",
+    "spark.cores.max": "",
+    "zeppelin.dep.additionalRemoteRepository": "spark-packages,http://dl.bintray.com/spark-packages/maven,false;",
+    "zeppelin.spark.importImplicit": "true",
+    "zeppelin.spark.sql.stacktrace": "false",
+    "zeppelin.spark.concurrentSQL": "false",
+    "zeppelin.spark.useHiveContext": "true",
+    "zeppelin.pyspark.python": "python",
+    "zeppelin.dep.localrepo": "local-repo",
+    "zeppelin.R.knitr": "true",
+    "zeppelin.spark.maxResult": "1000",
+    "master": "local[*]",
+    "spark.app.name": "Zeppelin",
+    "zeppelin.R.image.width": "100%",
+    "zeppelin.R.render.options": "out.format \u003d \u0027html\u0027, comment \u003d NA, echo \u003d FALSE, results \u003d \u0027asis\u0027, message \u003d F, warning \u003d F",
+    "zeppelin.R.cmd": "R"
+  },
+  "status": "READY",
+  "interpreterGroup": [
+    {
+      "name": "spark",
+      "class": "org.apache.zeppelin.spark.SparkInterpreter",
+      "defaultInterpreter": true
+    },
+    {
+      "name": "sql",
+      "class": "org.apache.zeppelin.spark.SparkSqlInterpreter",
+      "defaultInterpreter": false
+    },
+    {
+      "name": "dep",
+      "class": "org.apache.zeppelin.spark.DepInterpreter",
+      "defaultInterpreter": false
+    },
+    {
+      "name": "pyspark",
+      "class": "org.apache.zeppelin.spark.PySparkInterpreter",
+      "defaultInterpreter": false
+    },
+    {
+      "name": "r",
+      "class": "org.apache.zeppelin.spark.SparkRInterpreter",
+      "defaultInterpreter": false
+    }
+  ],
+  "dependencies": [],
+  "option": {
+    "remote": true,
+    "port": -1,
+    "perNoteSession": false,
+    "perNoteProcess": false,
+    "isExistingProcess": false,
+    "setPermission": false
+  }
+}
+'''
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/190094ba/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/scripts/status_params.py
new file mode 100644
index 0000000..35360c6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/scripts/status_params.py
@@ -0,0 +1,29 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management.libraries.script import Script
+
+config = Script.get_config()
+
+zeppelin_pid_dir = config['configurations']['zeppelin-env']['zeppelin_pid_dir']
+zeppelin_user = config['configurations']['zeppelin-env']['zeppelin_user']
+zeppelin_group = config['configurations']['zeppelin-env']['zeppelin_group']
+zeppelin_log_dir = config['configurations']['zeppelin-env']['zeppelin_log_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/190094ba/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/templates/input.config-zeppelin.json.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/templates/input.config-zeppelin.json.j2 b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/templates/input.config-zeppelin.json.j2
new file mode 100644
index 0000000..2b373d5
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/templates/input.config-zeppelin.json.j2
@@ -0,0 +1,48 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"zeppelin",
+      "rowtype":"service",
+      "path":"{{default('/configurations/zeppelin-env/zeppelin_log_dir', '/var/log/zeppelin')}}/zeppelin-zeppelin-*.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "zeppelin"
+          ]
+        }
+      },
+      "log4j_format":"",
+      "multiline_pattern":"^(%{SPACE}%{LOGLEVEL:level}%{SPACE}\\[%{TIMESTAMP_ISO8601:logtime}\\])",
+      "message_pattern":"(?m)^%{SPACE}%{LOGLEVEL:level}%{SPACE}\\[%{TIMESTAMP_ISO8601:logtime}\\]%{SPACE}\\(\\{{"{"}}%{DATA:thread_name}\\{{"}"}}%{SPACE}%{JAVAFILE:file}\\[%{JAVAMETHOD:method}\\]:%{INT:line_number}\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/190094ba/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/quicklinks/quicklinks.json b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/quicklinks/quicklinks.json
new file mode 100644
index 0000000..c1d8491
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/quicklinks/quicklinks.json
@@ -0,0 +1,35 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol":
+    {
+      "type":"https",
+      "checks":[
+        {
+          "property":"zeppelin.ssl",
+          "desired":"true",
+          "site":"zeppelin-config"
+        }
+      ]
+    },
+
+    "links": [
+      {
+        "name": "zeppelin_ui",
+        "label": "Zeppelin UI",
+        "requires_user_name": "false",
+        "component_name": "ZEPPELIN_MASTER",
+        "url":"%@://%@:%@/",
+        "port":{
+          "http_property": "zeppelin.server.port",
+          "http_default_port": "9995",
+          "https_property": "zeppelin.server.port",
+          "https_default_port": "9995",
+          "regex": "^(\\d+)$",
+          "site": "zeppelin-config"
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/190094ba/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/role_command_order.json b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/role_command_order.json
new file mode 100644
index 0000000..3b7d2d0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/role_command_order.json
@@ -0,0 +1,7 @@
+{
+  "general_deps" : {
+    "_comment" : "dependencies for ZEPPELIN",
+    "ZEPPELIN_MASTER-START" : ["NAMENODE-START"],
+    "ZEPPELIN_SERVICE_CHECK-SERVICE_CHECK" : ["ZEPPELIN_MASTER-START"]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/190094ba/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/alerts.json b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/alerts.json
new file mode 100644
index 0000000..53dc4a2
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/alerts.json
@@ -0,0 +1,18 @@
+{
+  "ZEPPELIN": {
+    "service": [],
+    "ZEPPELIN_MASTER": [
+      {
+        "name": "zeppelin_server_status",
+        "label": "Zeppelin Server Status",
+        "description": "This host-level alert is triggered if the Zeppelin server cannot be determined to be up and responding to client requests.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "SCRIPT",
+          "path": "ZEPPELIN/0.7.0/package/scripts/alert_check_zeppelin.py"
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/190094ba/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/configuration/zeppelin-config.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/configuration/zeppelin-config.xml b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/configuration/zeppelin-config.xml
new file mode 100644
index 0000000..ca6b295
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/configuration/zeppelin-config.xml
@@ -0,0 +1,208 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+  <!-- contents of actual zeppelin-site.xml -->
+  <property>
+    <name>zeppelin.server.addr</name>
+    <value>0.0.0.0</value>
+    <description>Server address</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.server.port</name>
+    <value>9995</value>
+    <description>Server port.The subsequent port (e.g. 9996) should also be open as it will be
+            used by the web socket
+        </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.server.ssl.port</name>
+    <value>9995</value>
+    <description>Server ssl port. (used when ssl property is set to true)
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.notebook.dir</name>
+    <value>notebook</value>
+    <description>notebook persist</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.notebook.homescreen</name>
+    <value> </value>
+    <description>id of notebook to be displayed in homescreen. e.g.) 2A94M5J1Z Empty value
+            displays default home screen
+        </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.notebook.homescreen.hide</name>
+    <value>false</value>
+    <description>hide homescreen notebook from list when this value set to true</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.notebook.s3.user</name>
+    <value>user</value>
+    <description>user name for s3 folder structure. If S3 is used to store the notebooks, it is
+            necessary to use the following folder structure bucketname/username/notebook/
+        </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.notebook.s3.bucket</name>
+    <value>zeppelin</value>
+    <description>bucket name for notebook storage. If S3 is used to store the notebooks, it is
+            necessary to use the following folder structure bucketname/username/notebook/
+        </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.notebook.storage</name>
+    <value>org.apache.zeppelin.notebook.repo.HdfsNotebookRepo</value>
+    <description>notebook persistence layer implementation. If S3 is used, set this to
+            org.apache.zeppelin.notebook.repo.S3NotebookRepo instead. If S3 is used to store the
+            notebooks, it is necessary to use the following folder structure
+            bucketname/username/notebook/
+        </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.interpreter.dir</name>
+    <value>interpreter</value>
+    <description>Interpreter implementation base directory</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.interpreters</name>
+    <value>org.apache.zeppelin.spark.SparkInterpreter,org.apache.zeppelin.spark.PySparkInterpreter,org.apache.zeppelin.spark.SparkSqlInterpreter,org.apache.zeppelin.spark.DepInterpreter,org.apache.zeppelin.markdown.Markdown,org.apache.zeppelin.angular.AngularInterpreter,org.apache.zeppelin.shell.ShellInterpreter,org.apache.zeppelin.jdbc.JDBCInterpreter,org.apache.zeppelin.phoenix.PhoenixInterpreter,org.apache.zeppelin.livy.LivySparkInterpreter,org.apache.zeppelin.livy.LivyPySparkInterpreter,org.apache.zeppelin.livy.LivySparkRInterpreter,org.apache.zeppelin.livy.LivySparkSQLInterpreter</value>
+    <description>Comma separated interpreter configurations. First interpreter become a
+            default
+        </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.interpreter.group.order</name>
+    <value>spark,angular,jdbc,livy,md,sh</value>
+    <description>Comma separated interpreter configurations. First interpreter become default
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.interpreter.connect.timeout</name>
+    <value>30000</value>
+    <description>Interpreter process connect timeout in msec.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.ssl</name>
+    <value>false</value>
+    <description>Should SSL be used by the servers?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.ssl.client.auth</name>
+    <value>false</value>
+    <description>Should client authentication be used for SSL connections?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.ssl.keystore.path</name>
+    <value>conf/keystore</value>
+    <description>Path to keystore relative to Zeppelin home</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.ssl.keystore.type</name>
+    <value>JKS</value>
+    <description>The format of the given keystore (e.g. JKS or PKCS12)</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.ssl.keystore.password</name>
+    <value>change me</value>
+    <description>Keystore password. Can be obfuscated by the Jetty Password tool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.ssl.key.manager.password</name>
+    <value>change me</value>
+    <description>Key Manager password. Defaults to keystore password. Can be obfuscated.
+        </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.ssl.truststore.path</name>
+    <value>conf/truststore</value>
+    <description>Path to truststore relative to Zeppelin home. Defaults to the keystore path
+        </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.ssl.truststore.type</name>
+    <value>JKS</value>
+    <description>The format of the given truststore (e.g. JKS or PKCS12). Defaults to the same
+            type as the keystore type
+        </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.ssl.truststore.password</name>
+    <value>change me</value>
+    <description>Truststore password. Can be obfuscated by the Jetty Password tool. Defaults to
+            the keystore password
+        </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.server.allowed.origins</name>
+    <value>*</value>
+    <description>Allowed sources for REST and WebSocket requests (i.e.
+            http://onehost:8080,http://otherhost.com). If you leave * you are vulnerable to
+            https://issues.apache.org/jira/browse/ZEPPELIN-173
+        </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.anonymous.allowed</name>
+    <value>false</value>
+    <description>Anonymous user allowed by default</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>zeppelin.notebook.public</name>
+    <value>false</value>
+    <description>Make notebook public by default when created, private otherwise</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>zeppelin.websocket.max.text.message.size</name>
+    <value>1024000</value>
+    <description>Size in characters of the maximum text message to be received by websocket. Defaults to 1024000</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.interpreter.config.upgrade</name>
+    <value>true</value>
+    <description>If this is set to true, on every restart of Zeppelin server default interpreter parameters will be reset</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/190094ba/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/configuration/zeppelin-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/configuration/zeppelin-env.xml b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/configuration/zeppelin-env.xml
new file mode 100644
index 0000000..85373e0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/configuration/zeppelin-env.xml
@@ -0,0 +1,184 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+  <property>
+    <name>zeppelin_pid_dir</name>
+    <value>/var/run/zeppelin</value>
+    <description>Dir containing process ID file</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin_user</name>
+    <value>zeppelin</value>
+    <property-type>USER</property-type>
+    <description>User zeppelin daemon runs as</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin_group</name>
+    <value>zeppelin</value>
+    <property-type>GROUP</property-type>
+    <description>zeppelin group</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin_log_dir</name>
+    <value>/var/log/zeppelin</value>
+    <description>Zeppelin Log dir</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin_env_content</name>
+    <description>This is the jinja template for zeppelin-env.sh file</description>
+    <value>
+# export JAVA_HOME=
+export JAVA_HOME={{java64_home}}
+# export MASTER=                              # Spark master url. eg. spark://master_addr:7077. Leave empty if you want to use local mode.
+export MASTER=yarn-client
+export SPARK_YARN_JAR={{spark_jar}}
+# export ZEPPELIN_JAVA_OPTS                   # Additional jvm options. for example, export ZEPPELIN_JAVA_OPTS="-Dspark.executor.memory=8g -Dspark.cores.max=16"
+# export ZEPPELIN_MEM                         # Zeppelin jvm mem options Default -Xms1024m -Xmx1024m -XX:MaxPermSize=512m
+# export ZEPPELIN_INTP_MEM                    # zeppelin interpreter process jvm mem options. Default -Xms1024m -Xmx1024m -XX:MaxPermSize=512m
+# export ZEPPELIN_INTP_JAVA_OPTS              # zeppelin interpreter process jvm options.
+# export ZEPPELIN_SSL_PORT                    # ssl port (used when ssl environment variable is set to true)
+
+# export ZEPPELIN_LOG_DIR                     # Where log files are stored.  PWD by default.
+export ZEPPELIN_LOG_DIR={{zeppelin_log_dir}}
+# export ZEPPELIN_PID_DIR                     # The pid files are stored. ${ZEPPELIN_HOME}/run by default.
+export ZEPPELIN_PID_DIR={{zeppelin_pid_dir}}
+# export ZEPPELIN_WAR_TEMPDIR                 # The location of jetty temporary directory.
+# export ZEPPELIN_NOTEBOOK_DIR                # Where notebook saved
+# export ZEPPELIN_NOTEBOOK_HOMESCREEN         # Id of notebook to be displayed in homescreen. ex) 2A94M5J1Z
+# export ZEPPELIN_NOTEBOOK_HOMESCREEN_HIDE    # hide homescreen notebook from list when this value set to "true". default "false"
+# export ZEPPELIN_NOTEBOOK_S3_BUCKET          # Bucket where notebook saved
+# export ZEPPELIN_NOTEBOOK_S3_ENDPOINT        # Endpoint of the bucket
+# export ZEPPELIN_NOTEBOOK_S3_USER            # User in bucket where notebook saved. For example bucket/user/notebook/2A94M5J1Z/note.json
+# export ZEPPELIN_IDENT_STRING                # A string representing this instance of zeppelin. $USER by default.
+# export ZEPPELIN_NICENESS                    # The scheduling priority for daemons. Defaults to 0.
+# export ZEPPELIN_INTERPRETER_LOCALREPO       # Local repository for interpreter's additional dependency loading
+# export ZEPPELIN_NOTEBOOK_STORAGE            # Refers to pluggable notebook storage class, can have two classes simultaneously with a sync between them (e.g. local and remote).
+# export ZEPPELIN_NOTEBOOK_ONE_WAY_SYNC       # If there are multiple notebook storages, should we treat the first one as the only source of truth?
+# export ZEPPELIN_NOTEBOOK_PUBLIC             # Make notebook public by default when created, private otherwise
+export ZEPPELIN_INTP_CLASSPATH_OVERRIDES="{{external_dependency_conf}}"
+#### Spark interpreter configuration ####
+
+## Kerberos ticket refresh setting
+##
+export KINIT_FAIL_THRESHOLD=5
+export KERBEROS_REFRESH_INTERVAL=1d
+
+## Use provided spark installation ##
+## defining SPARK_HOME makes Zeppelin run spark interpreter process using spark-submit
+##
+# export SPARK_HOME                           # (required) When it is defined, load it instead of Zeppelin embedded Spark libraries
+# export SPARK_HOME={{spark_home}}
+# export SPARK_SUBMIT_OPTIONS                 # (optional) extra options to pass to spark submit. eg) "--driver-memory 512M --executor-memory 1G".
+# export SPARK_APP_NAME                       # (optional) The name of spark application.
+
+## Use embedded spark binaries ##
+## without SPARK_HOME defined, Zeppelin still able to run spark interpreter process using embedded spark binaries.
+## however, it is not encouraged when you can define SPARK_HOME
+##
+# Options read in YARN client mode
+# export HADOOP_CONF_DIR                      # yarn-site.xml is located in configuration directory in HADOOP_CONF_DIR.
+export HADOOP_CONF_DIR=/etc/hadoop/conf
+# Pyspark (supported with Spark 1.2.1 and above)
+# To configure pyspark, you need to set spark distribution's path to 'spark.home' property in Interpreter setting screen in Zeppelin GUI
+# export PYSPARK_PYTHON                       # path to the python command. must be the same path on the driver(Zeppelin) and all workers.
+# export PYTHONPATH
+
+## Spark interpreter options ##
+##
+# export ZEPPELIN_SPARK_USEHIVECONTEXT        # Use HiveContext instead of SQLContext if set true. true by default.
+# export ZEPPELIN_SPARK_CONCURRENTSQL         # Execute multiple SQL concurrently if set true. false by default.
+# export ZEPPELIN_SPARK_IMPORTIMPLICIT        # Import implicits, UDF collection, and sql if set true. true by default.
+# export ZEPPELIN_SPARK_MAXRESULT             # Max number of Spark SQL result to display. 1000 by default.
+# export ZEPPELIN_WEBSOCKET_MAX_TEXT_MESSAGE_SIZE       # Size in characters of the maximum text message to be received by websocket. Defaults to 1024000
+
+
+#### HBase interpreter configuration ####
+
+## To connect to HBase running on a cluster, either HBASE_HOME or HBASE_CONF_DIR must be set
+
+# export HBASE_HOME=                          # (require) Under which HBase scripts and configuration should be
+# export HBASE_CONF_DIR=                      # (optional) Alternatively, configuration directory can be set to point to the directory that has hbase-site.xml
+
+# export ZEPPELIN_IMPERSONATE_CMD             # Optional, when user want to run interpreter as end web user. eg) 'sudo -H -u ${ZEPPELIN_IMPERSONATE_USER} bash -c '
+
+    </value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.executor.mem</name>
+    <value>512m</value>
+    <description>Executor memory to use (e.g. 512m or 1g)</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.executor.instances</name>
+    <value>2</value>
+    <description>Number of executor instances to use (e.g. 2)</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.spark.jar.dir</name>
+    <value>/apps/zeppelin</value>
+    <description>Shared location where zeppelin spark jar will be copied to. Should be accesible
+      by all cluster nodes
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>zeppelin.server.kerberos.principal</name>
+    <value/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <description>
+      Kerberos principal name for the Zeppelin.
+    </description>
+    <property-type>KERBEROS_PRINCIPAL</property-type>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.server.kerberos.keytab</name>
+    <value/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <description>
+      Location of the kerberos keytab file for the Zeppelin.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/190094ba/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/configuration/zeppelin-log4j-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/configuration/zeppelin-log4j-properties.xml b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/configuration/zeppelin-log4j-properties.xml
new file mode 100644
index 0000000..bca0091
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/configuration/zeppelin-log4j-properties.xml
@@ -0,0 +1,37 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+<property>
+    <name>log4j_properties_content</name>
+    <description>This is the content for log4j.properties file</description>
+    <value>
+log4j.rootLogger = INFO, dailyfile
+log4j.appender.stdout = org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout = org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%5p [%d] ({%t} %F[%M]:%L) - %m%n
+log4j.appender.dailyfile.DatePattern=.yyyy-MM-dd
+log4j.appender.dailyfile.Threshold = INFO
+log4j.appender.dailyfile = org.apache.log4j.DailyRollingFileAppender
+log4j.appender.dailyfile.File = ${zeppelin.log.file}
+log4j.appender.dailyfile.layout = org.apache.log4j.PatternLayout
+log4j.appender.dailyfile.layout.ConversionPattern=%5p [%d] ({%t} %F[%M]:%L) - %m%n
+    </value>
+    <on-ambari-upgrade add="false"/>
+</property>
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/190094ba/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/configuration/zeppelin-shiro-ini.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/configuration/zeppelin-shiro-ini.xml b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/configuration/zeppelin-shiro-ini.xml
new file mode 100644
index 0000000..1ff3d9e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/configuration/zeppelin-shiro-ini.xml
@@ -0,0 +1,97 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+  <property>
+    <name>shiro_ini_content</name>
+    <description>This is the jinja template for shiro.ini file</description>
+      <value>
+[users]
+# List of users with their password allowed to access Zeppelin.
+# To use a different strategy (LDAP / Database / ...) check the shiro doc at http://shiro.apache.org/configuration.html#Configuration-INISections
+admin = admin, admin
+user1 = user1, role1, role2
+user2 = user2, role3
+user3 = user3, role2
+
+# Sample LDAP configuration, for user Authentication, currently tested for single Realm
+[main]
+### A sample for configuring Active Directory Realm
+#activeDirectoryRealm = org.apache.zeppelin.realm.ActiveDirectoryGroupRealm
+#activeDirectoryRealm.systemUsername = userNameA
+
+#use either systemPassword or hadoopSecurityCredentialPath, more details in http://zeppelin.apache.org/docs/latest/security/shiroauthentication.html
+#activeDirectoryRealm.systemPassword = passwordA
+#activeDirectoryRealm.hadoopSecurityCredentialPath = jceks://file/user/zeppelin/zeppelin.jceks
+#activeDirectoryRealm.searchBase = CN=Users,DC=SOME_GROUP,DC=COMPANY,DC=COM
+#activeDirectoryRealm.url = ldap://ldap.test.com:389
+#activeDirectoryRealm.groupRolesMap = "CN=admin,OU=groups,DC=SOME_GROUP,DC=COMPANY,DC=COM":"admin","CN=finance,OU=groups,DC=SOME_GROUP,DC=COMPANY,DC=COM":"finance","CN=hr,OU=groups,DC=SOME_GROUP,DC=COMPANY,DC=COM":"hr"
+#activeDirectoryRealm.authorizationCachingEnabled = false
+
+### A sample for configuring LDAP Directory Realm
+#ldapRealm = org.apache.zeppelin.realm.LdapGroupRealm
+## search base for ldap groups (only relevant for LdapGroupRealm):
+#ldapRealm.contextFactory.environment[ldap.searchBase] = dc=COMPANY,dc=COM
+#ldapRealm.contextFactory.url = ldap://ldap.test.com:389
+#ldapRealm.userDnTemplate = uid={0},ou=Users,dc=COMPANY,dc=COM
+#ldapRealm.contextFactory.authenticationMechanism = SIMPLE
+
+### A sample PAM configuration
+#pamRealm=org.apache.zeppelin.realm.PamRealm
+#pamRealm.service=sshd
+
+
+sessionManager = org.apache.shiro.web.session.mgt.DefaultWebSessionManager
+### If caching of user is required then uncomment below lines
+cacheManager = org.apache.shiro.cache.MemoryConstrainedCacheManager
+securityManager.cacheManager = $cacheManager
+
+cookie = org.apache.shiro.web.servlet.SimpleCookie
+cookie.name = JSESSIONID
+#Uncomment the line below when running Zeppelin-Server in HTTPS mode
+#cookie.secure = true
+cookie.httpOnly = true
+sessionManager.sessionIdCookie = $cookie
+
+securityManager.sessionManager = $sessionManager
+# 86,400,000 milliseconds = 24 hour
+securityManager.sessionManager.globalSessionTimeout = 86400000
+shiro.loginUrl = /api/login
+
+[roles]
+role1 = *
+role2 = *
+role3 = *
+admin = *
+
+[urls]
+# This section is used for url-based security.
+# You can secure interpreter, configuration and credential information by urls. Comment or uncomment the below urls that you want to hide.
+# anon means the access is anonymous.
+# authc means Form based Auth Security
+# To enfore security, comment the line below and uncomment the next one
+/api/version = anon
+#/api/interpreter/** = authc, roles[admin]
+#/api/configurations/** = authc, roles[admin]
+#/api/credential/** = authc, roles[admin]
+#/** = anon
+/** = authc
+      </value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/190094ba/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/kerberos.json b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/kerberos.json
new file mode 100644
index 0000000..b605c9d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/kerberos.json
@@ -0,0 +1,51 @@
+{
+  "services": [
+    {
+      "name": "ZEPPELIN",
+      "identities": [
+        {
+          "name": "/smokeuser"
+        },
+        {
+          "name": "zeppelin_user",
+          "principal": {
+            "value": "${zeppelin-env/zeppelin_user}${principal_suffix}@${realm}",
+            "type" : "user",
+            "configuration": "zeppelin-env/zeppelin.server.kerberos.principal",
+            "local_username" : "${zeppelin-env/zeppelin_user}"
+          },
+          "keytab": {
+            "file": "${keytab_dir}/zeppelin.server.kerberos.keytab",
+            "owner": {
+              "name": "${zeppelin-env/zeppelin_user}",
+              "access": "r"
+            },
+            "group": {
+              "name": "${cluster-env/user_group}",
+              "access": ""
+            },
+            "configuration": "zeppelin-env/zeppelin.server.kerberos.keytab"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "ZEPPELIN_MASTER"
+        }
+      ],
+      "configurations": [
+        {
+          "zeppelin-env": {
+            "zeppelin.kerberos.enabled": "true"
+          }
+        },
+        {
+          "core-site": {
+            "hadoop.proxyuser.${zeppelin-env/zeppelin_user}.groups": "*",
+            "hadoop.proxyuser.${zeppelin-env/zeppelin_user}.hosts": "*"
+          }
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/190094ba/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/metainfo.xml b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/metainfo.xml
new file mode 100644
index 0000000..891d1f5
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/metainfo.xml
@@ -0,0 +1,103 @@
+<?xml version="1.0"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ZEPPELIN</name>
+      <displayName>Zeppelin Notebook</displayName>
+      <comment>A web-based notebook that enables interactive data analytics. It enables you to
+        make beautiful data-driven, interactive and collaborative documents with SQL, Scala
+        and more.
+      </comment>
+      <version>0.7.0</version>
+      <components>
+        <component>
+          <name>ZEPPELIN_MASTER</name>
+          <displayName>Zeppelin Notebook</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/master.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>10000</timeout>
+          </commandScript>
+          <dependencies>
+            <dependency>
+              <name>SPARK/SPARK_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>YARN/YARN_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <logs>
+            <log>
+              <logId>zeppelin</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>zeppelin</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>HDFS</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>zeppelin-config</config-type>
+        <config-type>zeppelin-env</config-type>
+        <config-type>zeppelin-shiro-ini</config-type>
+        <config-type>zeppelin-log4j-properties</config-type>
+      </configuration-dependencies>
+      <restartRequiredAfterChange>true</restartRequiredAfterChange>
+
+      <quickLinksConfigurations>
+        <quickLinksConfiguration>
+          <fileName>quicklinks.json</fileName>
+          <default>true</default>
+        </quickLinksConfiguration>
+      </quickLinksConfigurations>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/190094ba/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/package/scripts/alert_check_zeppelin.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/package/scripts/alert_check_zeppelin.py b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/package/scripts/alert_check_zeppelin.py
new file mode 100644
index 0000000..e6d7a91
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/package/scripts/alert_check_zeppelin.py
@@ -0,0 +1,47 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import glob
+import sys
+
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.script import Script
+
+reload(sys)
+sys.setdefaultencoding('utf8')
+config = Script.get_config()
+
+zeppelin_pid_dir = config['configurations']['zeppelin-env']['zeppelin_pid_dir']
+
+RESULT_CODE_OK = 'OK'
+RESULT_CODE_CRITICAL = 'CRITICAL'
+RESULT_CODE_UNKNOWN = 'UNKNOWN'
+
+
+def execute(configurations={}, parameters={}, host_name=None):
+  try:
+    pid_file = glob.glob(zeppelin_pid_dir + '/zeppelin-*.pid')[0]
+    check_process_status(pid_file)
+  except ComponentIsNotRunning as ex:
+    return (RESULT_CODE_CRITICAL, [str(ex)])
+  except:
+    return (RESULT_CODE_CRITICAL, ["Zeppelin is not running"])
+
+  return (RESULT_CODE_OK, ["Successful connection to Zeppelin"])


Mime
View raw message