ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jmar...@apache.org
Subject [17/58] [partial] ambari git commit: [RTC 136620]: Introduce BigInsights stacks on Ambari 2.4 branch
Date Wed, 17 Aug 2016 00:33:11 GMT
http://git-wip-us.apache.org/repos/asf/ambari/blob/66984d9a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/hive_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/hive_client.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/hive_client.py
new file mode 100644
index 0000000..8334ae5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/hive_client.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import sys
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import iop_select
+from hive import hive
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons import OSConst
+
+class HiveClient(Script):
+  def install(self, env):
+    import params
+    self.install_packages(env, exclude_packages=params.hive_exclude_packages)
+    self.configure(env)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hive(name='client')
+  
+  
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HiveClientDefault(HiveClient):        
+  def get_stack_to_component(self):
+    return {"BigInsights": "hadoop-client"}
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Hive client Stack Upgrade pre-restart")
+
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_hdp_stack_version(params.version), '4.1.0.0') >= 0:
+      conf_select.select(params.stack_name, "hive", params.version)
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      iop_select.select("hadoop-client", params.version)
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    """
+    Execute hdp-select before reconfiguring this client to the new HDP version.
+
+    :param env:
+    :param upgrade_type:
+    :return:
+    """
+    Logger.info("Executing Hive HCat Client Stack Upgrade pre-restart")
+
+    import params
+    env.set_params(params)
+
+    # this function should not execute if the version can't be determined or
+    # is not at least HDP 2.2.0.0
+    if not params.version or compare_versions(params.version, "2.2", format=True) < 0:
+      return
+
+    # HCat client doesn't have a first-class entry in hdp-select. Since clients always
+    # update after daemons, this ensures that the hcat directories are correct on hosts
+    # which do not include the WebHCat daemon
+    iop_select.select("hive-webhcat", params.version)
+
+if __name__ == "__main__":
+  HiveClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/66984d9a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/hive_metastore.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/hive_metastore.py
new file mode 100644
index 0000000..7df8e62
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/hive_metastore.py
@@ -0,0 +1,241 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+
+from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Execute, Directory
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import iop_select
+from resource_management.libraries.functions.constants import Direction
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.version import compare_versions
+from resource_management.libraries.functions.security_commons import build_expectations
+from resource_management.libraries.functions.security_commons import cached_kinit_executor
+from resource_management.libraries.functions.security_commons import get_params_from_filesystem
+from resource_management.libraries.functions.security_commons import validate_security_config_properties
+from resource_management.libraries.functions.security_commons import FILE_TYPE_XML
+from resource_management.core.resources.system import File
+
+from hive import hive
+from hive import jdbc_connector
+from hive_service import hive_service
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons import OSConst
+
+# the legacy conf.server location in IOP
+LEGACY_HIVE_SERVER_CONF = "/etc/hive/conf.server"
+
+class HiveMetastore(Script):
+  def install(self, env):
+    import params
+    self.install_packages(env, exclude_packages = params.hive_exclude_packages)
+
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    # writing configurations on start required for securtity
+    self.configure(env)
+
+    hive_service('metastore', action='start', upgrade_type=upgrade_type)
+
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    hive_service('metastore', action='stop', upgrade_type=upgrade_type)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hive(name = 'metastore')
+    
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HiveMetastoreDefault(HiveMetastore):
+  def get_stack_to_component(self):
+    return {"BigInsights": "hive-metastore"}
+
+  def status(self, env):
+    import status_params
+    from resource_management.libraries.functions import check_process_status
+
+    env.set_params(status_params)
+    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
+    # Recursively check all existing gmetad pid files
+    check_process_status(pid_file)
+
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Metastore Stack Upgrade pre-restart")
+    import params
+
+    env.set_params(params)
+
+    is_stack_hdp_23 = Script.is_hdp_stack_greater_or_equal("4.2.0.0")
+    is_upgrade = params.upgrade_direction == Direction.UPGRADE
+
+    if is_stack_hdp_23 and is_upgrade:
+      self.upgrade_schema(env)
+      
+    if params.version and compare_versions(format_hdp_stack_version(params.version), '4.0.0.0') >= 0:
+      conf_select.select(params.stack_name, "hive", params.version)
+      iop_select.select("hive-metastore", params.version)
+
+
+  def security_status(self, env):
+    import status_params
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      props_value_check = {"hive.server2.authentication": "KERBEROS",
+                           "hive.metastore.sasl.enabled": "true",
+                           "hive.security.authorization.enabled": "true"}
+      props_empty_check = ["hive.metastore.kerberos.keytab.file",
+                           "hive.metastore.kerberos.principal"]
+
+      props_read_check = ["hive.metastore.kerberos.keytab.file"]
+      hive_site_props = build_expectations('hive-site', props_value_check, props_empty_check,
+                                            props_read_check)
+
+      hive_expectations ={}
+      hive_expectations.update(hive_site_props)
+
+      security_params = get_params_from_filesystem(status_params.hive_conf_dir,
+                                                   {'hive-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, hive_expectations)
+      if not result_issues: # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if 'hive-site' not in security_params \
+            or 'hive.metastore.kerberos.keytab.file' not in security_params['hive-site'] \
+            or 'hive.metastore.kerberos.principal' not in security_params['hive-site']:
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hive_user,
+                                security_params['hive-site']['hive.metastore.kerberos.keytab.file'],
+                                security_params['hive-site']['hive.metastore.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+
+  def upgrade_schema(self, env):
+    """
+    Executes the schema upgrade binary.  This is its own function because it could
+    be called as a standalone task from the upgrade pack, but is safe to run it for each
+    metastore instance. The schema upgrade on an already upgraded metastore is a NOOP.
+
+    The metastore schema upgrade requires a database driver library for most
+    databases. During an upgrade, it's possible that the library is not present,
+    so this will also attempt to copy/download the appropriate driver.
+
+    This function will also ensure that configurations are written out to disk before running
+    since the new configs will most likely not yet exist on an upgrade.
+
+    Should not be invoked for a DOWNGRADE; Metastore only supports schema upgrades.
+    """
+    Logger.info("Upgrading Hive Metastore Schema")
+    import params
+    env.set_params(params)
+
+    # ensure that configurations are written out before trying to upgrade the schema
+    # since the schematool needs configs and doesn't know how to use the hive conf override
+    self.configure(env)
+
+    if params.security_enabled:
+      kinit_command=format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal}; ")
+      Execute(kinit_command,user=params.smokeuser)
+
+    # ensure that the JDBC drive is present for the schema tool; if it's not
+    # present, then download it first
+    if params.hive_jdbc_driver in params.hive_jdbc_drivers_list:
+      target_directory = format("/usr/iop/{version}/hive/lib")
+
+      # download it if it does not exist
+      if not os.path.exists(params.source_jdbc_file):
+        jdbc_connector()
+        
+      target_directory_and_filename = os.path.join(target_directory, os.path.basename(params.source_jdbc_file))
+      
+      Execute(('cp', params.target, target_directory),
+        path=["/bin", "/usr/bin/"], sudo = True)
+        
+      File(os.path.join(target_directory, os.path.basename(params.target)),
+        mode = 0644,
+      )
+
+      if params.sqla_db_used:
+        target_native_libs_directory = format("{target_directory}/native/lib64")
+
+        Execute(format("yes | {sudo} cp {jars_in_hive_lib} {target_directory}"))
+
+        Directory(target_native_libs_directory, recursive=True)
+
+        Execute(format("yes | {sudo} cp {libs_in_hive_lib} {target_native_libs_directory}"))
+
+        Execute(format("{sudo} chown -R {hive_user}:{user_group} {hive_lib}/*"))
+      else:
+        # copy the JDBC driver from the older metastore location to the new location only
+        # if it does not already exist
+        if not os.path.exists(target_directory_and_filename):
+          Execute(('cp', params.source_jdbc_file, target_directory),
+            path=["/bin", "/usr/bin/"], sudo = True)
+
+      File(target_directory_and_filename, mode = 0644)
+
+    # build the schema tool command
+    binary = format("/usr/iop/{version}/hive/bin/schematool")
+
+    # the conf.server directory changed locations
+    # since the configurations have not been written out yet during an upgrade
+    # we need to choose the original legacy location
+    schematool_hive_server_conf_dir = params.hive_server_conf_dir
+    if params.current_version is not None:
+      current_version = format_hdp_stack_version(params.current_version)
+      if compare_versions(current_version, "4.1.0.0") < 0:
+        schematool_hive_server_conf_dir = LEGACY_HIVE_SERVER_CONF
+
+    env_dict = {
+      'HIVE_CONF_DIR': schematool_hive_server_conf_dir
+    }
+
+    command = format("{binary} -dbType {hive_metastore_db_type} -upgradeSchema")
+    Execute(command, user=params.hive_user, tries=1, environment=env_dict, logoutput=True)
+
+if __name__ == "__main__":
+  HiveMetastore().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/66984d9a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/hive_server.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/hive_server.py
new file mode 100644
index 0000000..e2ba489
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/hive_server.py
@@ -0,0 +1,177 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import iop_select
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
+from resource_management.libraries.functions.get_hdp_version import get_hdp_version
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+from ambari_commons import OSCheck, OSConst
+if OSCheck.is_windows_family():
+  from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
+from setup_ranger_hive import setup_ranger_hive
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons.constants import UPGRADE_TYPE_ROLLING
+from resource_management.core.logger import Logger
+
+import hive_server_upgrade
+from hive import hive
+from hive_service import hive_service
+
+
+class HiveServer(Script):
+  def install(self, env):
+    import params
+    self.install_packages(env, exclude_packages=params.hive_exclude_packages)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hive(name='hiveserver2')
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HiveServerDefault(HiveServer):
+  def get_stack_to_component(self):
+    return {"BigInsights": "hive-server2"}
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    
+    setup_ranger_hive(upgrade_type=upgrade_type)
+    hive_service('hiveserver2', action = 'start', upgrade_type=upgrade_type)
+
+    # only perform this if upgrading and rolling; a non-rolling upgrade doesn't need
+    # to do this since hive is already down
+    if upgrade_type == UPGRADE_TYPE_ROLLING:
+      hive_server_upgrade.post_upgrade_deregister()
+
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    # During rolling upgrade, HiveServer2 should not be stopped before new server is available.
+    # Once new server is started, old one is stopped by the --deregister command which is 
+    # invoked by the 'hive_server_upgrade.post_upgrade_deregister()' method
+    if upgrade_type != UPGRADE_TYPE_ROLLING:
+      hive_service( 'hiveserver2', action = 'stop' )
+
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{hive_pid_dir}/{hive_pid}")
+
+    # Recursively check all existing gmetad pid files
+    check_process_status(pid_file)
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Hive Server Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_hdp_stack_version(params.version), '4.0.0.0') >= 0:
+      conf_select.select(params.stack_name, "hive", params.version)
+      iop_select.select("hive-server2", params.version)
+      
+      # Copy mapreduce.tar.gz and tez.tar.gz to HDFS
+      resource_created = copy_to_hdfs(
+        "mapreduce",
+        params.user_group,
+        params.hdfs_user,
+        host_sys_prepped=params.host_sys_prepped)
+
+      if resource_created:
+        params.HdfsResource(None, action="execute")
+
+
+  def security_status(self, env):
+    import status_params
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      props_value_check = {"hive.server2.authentication": "KERBEROS",
+                           "hive.metastore.sasl.enabled": "true",
+                           "hive.security.authorization.enabled": "true"}
+      props_empty_check = ["hive.server2.authentication.kerberos.keytab",
+                           "hive.server2.authentication.kerberos.principal",
+                           "hive.server2.authentication.spnego.principal",
+                           "hive.server2.authentication.spnego.keytab"]
+
+      props_read_check = ["hive.server2.authentication.kerberos.keytab",
+                          "hive.server2.authentication.spnego.keytab"]
+      hive_site_props = build_expectations('hive-site', props_value_check, props_empty_check,
+                                            props_read_check)
+
+      hive_expectations ={}
+      hive_expectations.update(hive_site_props)
+
+      security_params = get_params_from_filesystem(status_params.hive_conf_dir,
+                                                   {'hive-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, hive_expectations)
+      if not result_issues: # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if 'hive-site' not in security_params \
+            or 'hive.server2.authentication.kerberos.keytab' not in security_params['hive-site'] \
+            or 'hive.server2.authentication.kerberos.principal' not in security_params['hive-site']  \
+            or 'hive.server2.authentication.spnego.keytab' not in security_params['hive-site'] \
+            or 'hive.server2.authentication.spnego.principal' not in security_params['hive-site']:
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hive_user,
+                                security_params['hive-site']['hive.server2.authentication.kerberos.keytab'],
+                                security_params['hive-site']['hive.server2.authentication.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hive_user,
+                                security_params['hive-site']['hive.server2.authentication.spnego.keytab'],
+                                security_params['hive-site']['hive.server2.authentication.spnego.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+
+if __name__ == "__main__":
+  HiveServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/66984d9a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/hive_server_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/hive_server_upgrade.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/hive_server_upgrade.py
new file mode 100644
index 0000000..050c71e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/hive_server_upgrade.py
@@ -0,0 +1,138 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import re
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.core.resources.system import Execute
+from resource_management.core import shell
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import iop_select
+from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.version import compare_versions
+
+
+def post_upgrade_deregister():
+  """
+  Runs the "hive --service hiveserver2 --deregister <version>" command to
+  de-provision the server in preparation for an upgrade. This will contact
+  ZooKeeper to remove the server so that clients that attempt to connect
+  will be directed to other servers automatically. Once all
+  clients have drained, the server will shutdown automatically; this process
+  could take a very long time.
+  This function will obtain the Kerberos ticket if security is enabled.
+  :return:
+  """
+  import params
+
+  Logger.info('HiveServer2 executing "deregister" command to complete upgrade...')
+
+  if params.security_enabled:
+    kinit_command=format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal}; ")
+    Execute(kinit_command,user=params.smokeuser)
+
+  # calculate the current hive server version
+  current_hiveserver_version = _get_current_hiveserver_version()
+  if current_hiveserver_version is None:
+    raise Fail('Unable to determine the current HiveServer2 version to deregister.')
+
+  # fallback when upgrading because /usr/hdp/current/hive-server2/conf/conf.server may not exist
+  hive_server_conf_dir = params.hive_server_conf_dir
+  if not os.path.exists(hive_server_conf_dir):
+    hive_server_conf_dir = "/etc/hive/conf.server"
+
+  # deregister
+  hive_execute_path = params.execute_path
+  # If upgrading, the upgrade-target hive binary should be used to call the --deregister command.
+  # If downgrading, the downgrade-source hive binary should be used to call the --deregister command.
+  # By now hdp-select has been called to set 'current' to target-stack
+  if "downgrade" == params.upgrade_direction:
+    # hive_bin
+    downgrade_version = params.current_version
+    if params.downgrade_from_version:
+      downgrade_version = params.downgrade_from_version
+    hive_execute_path = _get_hive_execute_path(downgrade_version)
+
+  command = format('hive --config {hive_server_conf_dir} --service hiveserver2 --deregister ' + current_hiveserver_version)
+  Execute(command, user=params.hive_user, path=hive_execute_path, tries=1 )
+
+def _get_hive_execute_path(stack_version):
+  """
+  Returns the exact execute path to use for the given stack-version.
+  This method does not return the "current" path
+  :param stack_version: Exact stack-version to use in the new path
+  :return: Hive execute path for the exact hdp stack-version
+  """
+  import params
+
+  hive_execute_path = params.execute_path
+  formatted_stack_version = format_hdp_stack_version(stack_version)
+  if formatted_stack_version and compare_versions(formatted_stack_version, "4.1") >= 0:
+    # hive_bin
+    new_hive_bin = format('/usr/iop/{stack_version}/hive/bin')
+    if (os.pathsep + params.hive_bin) in hive_execute_path:
+      hive_execute_path = hive_execute_path.replace(os.pathsep + params.hive_bin, os.pathsep + new_hive_bin)
+    # hadoop_bin_dir
+    new_hadoop_bin = iop_select.get_hadoop_dir_for_stack_version("bin", stack_version)
+    old_hadoop_bin = params.hadoop_bin_dir
+    if new_hadoop_bin and len(new_hadoop_bin) > 0 and (os.pathsep + old_hadoop_bin) in hive_execute_path:
+      hive_execute_path = hive_execute_path.replace(os.pathsep + old_hadoop_bin, os.pathsep + new_hadoop_bin)
+  return hive_execute_path
+
+def _get_current_hiveserver_version():
+  """
+  Runs "hive --version" and parses the result in order
+  to obtain the current version of hive.
+
+  :return:  the hiveserver2 version, returned by "hive --version"
+  """
+  import params
+
+  try:
+    # When downgrading the source version should be the version we are downgrading from
+    if "downgrade" == params.upgrade_direction:
+      if not params.downgrade_from_version:
+        raise Fail('The version from which we are downgrading from should be provided in \'downgrade_from_version\'')
+      source_version = params.downgrade_from_version
+    else:
+      source_version = params.current_version
+    hive_execute_path = _get_hive_execute_path(source_version)
+    version_hive_bin = params.hive_bin
+    formatted_source_version = format_hdp_stack_version(source_version)
+    if formatted_source_version and compare_versions(formatted_source_version, "4.1") >= 0:
+      version_hive_bin = format('/usr/iop/{source_version}/hive/bin')
+    command = format('{version_hive_bin}/hive --version')
+    return_code, iop_output = shell.call(command, user=params.hive_user, path=hive_execute_path)
+  except Exception, e:
+    Logger.error(str(e))
+    raise Fail('Unable to execute hive --version command to retrieve the hiveserver2 version.')
+
+  if return_code != 0:
+    raise Fail('Unable to determine the current HiveServer2 version because of a non-zero return code of {0}'.format(str(return_code)))
+
+  match = re.search('^(Hive) ([0-9]+.[0-9]+.\S+)', iop_output, re.MULTILINE)
+
+  if match:
+    current_hive_server_version = match.group(2)
+    return current_hive_server_version
+  else:
+    raise Fail('The extracted hiveserver2 version "{0}" does not matching any known pattern'.format(iop_output))
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/66984d9a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/hive_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/hive_service.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/hive_service.py
new file mode 100644
index 0000000..b564057
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/hive_service.py
@@ -0,0 +1,143 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import time
+
+from ambari_commons.constants import UPGRADE_TYPE_ROLLING
+from resource_management.core.logger import Logger
+from resource_management.core import shell
+from resource_management.libraries.functions.format import format
+from resource_management.core.resources.system import File, Execute
+from resource_management.core.resources.service import Service
+from resource_management.core.exceptions import Fail
+from resource_management.core.shell import as_user
+from resource_management.libraries.functions.hive_check import check_thrift_port_sasl
+from resource_management.libraries.functions import get_user_call_output
+
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def hive_service(name, action='start', upgrade_type=None):
+  import params
+  if name == 'metastore':
+    if action == 'start' or action == 'stop':
+      Service(params.hive_metastore_win_service_name, action=action)
+
+  if name == 'hiveserver2':
+    if action == 'start' or action == 'stop':
+      Service(params.hive_server_win_service_name, action=action)
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def hive_service(name, action='start', upgrade_type=None):
+
+  import params
+
+  if name == 'metastore':
+    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
+    cmd = format("{start_metastore_path} {hive_log_dir}/hive-metastore.out {hive_log_dir}/hive-metastore-err.out {pid_file} {hive_server_conf_dir} {hive_log_dir}")
+  elif name == 'hiveserver2':
+    pid_file = format("{hive_pid_dir}/{hive_pid}")
+    cmd = format("{start_hiveserver2_path} {hive_log_dir}/hive-server2.out {hive_log_dir}/hive-server2-err.out {pid_file} {hive_server_conf_dir} {hive_log_dir}")
+
+  pid = get_user_call_output.get_user_call_output(format("cat {pid_file}"), user=params.hive_user, is_checked_call=False)[1]
+  process_id_exists_command = format("ls {pid_file} >/dev/null 2>&1 && ps -p {pid} >/dev/null 2>&1")
+
+  if action == 'start':
+    if name == 'hiveserver2':
+      check_fs_root()
+
+    daemon_cmd = cmd
+    hadoop_home = params.hadoop_home
+    hive_bin = "hive"
+
+    # upgrading hiveserver2 (rolling_restart) means that there is an existing,
+    # de-registering hiveserver2; the pid will still exist, but the new
+    # hiveserver is spinning up on a new port, so the pid will be re-written
+    if upgrade_type == UPGRADE_TYPE_ROLLING:
+      process_id_exists_command = None
+
+      if params.version:
+        import os
+        hadoop_home = format("/usr/iop/{version}/hadoop")
+        hive_bin = os.path.join(params.hive_bin, hive_bin)
+      
+    Execute(daemon_cmd, 
+      user = params.hive_user,
+      environment = { 'HADOOP_HOME': hadoop_home, 'JAVA_HOME': params.java64_home, 'HIVE_BIN': hive_bin },
+      path = params.execute_path,
+      not_if = process_id_exists_command)
+
+    if params.hive_jdbc_driver == "com.mysql.jdbc.Driver" or \
+       params.hive_jdbc_driver == "org.postgresql.Driver" or \
+       params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
+      
+      db_connection_check_command = format(
+        "{java64_home}/bin/java -cp {check_db_connection_jar}:{target} org.apache.ambari.server.DBConnectionVerification '{hive_jdbc_connection_url}' {hive_metastore_user_name} {hive_metastore_user_passwd!p} {hive_jdbc_driver}")
+      
+      Execute(db_connection_check_command,
+              path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin', tries=5, try_sleep=10)
+  elif action == 'stop':
+
+    daemon_kill_cmd = format("{sudo} kill {pid}")
+    daemon_hard_kill_cmd = format("{sudo} kill -9 {pid}")
+
+    Execute(daemon_kill_cmd,
+      not_if = format("! ({process_id_exists_command})")
+    )
+
+    wait_time = 5
+    Execute(daemon_hard_kill_cmd,
+      not_if = format("! ({process_id_exists_command}) || ( sleep {wait_time} && ! ({process_id_exists_command}) )")
+    )
+
+    # check if stopped the process, else fail the task
+    Execute(format("! ({process_id_exists_command})"),
+      tries=20,
+      try_sleep=3,
+    )
+
+    File(pid_file,
+         action = "delete"
+    )
+
+def check_fs_root():
+  import params
+
+  if not params.fs_root.startswith("hdfs://"):
+    Logger.info("Skipping fs root check as fs_root does not start with hdfs://")
+    return
+
+  metatool_cmd = format("hive --config {hive_server_conf_dir} --service metatool")
+  cmd = as_user(format("{metatool_cmd} -listFSRoot", env={'PATH': params.execute_path}), params.hive_user) \
+        + format(" 2>/dev/null | grep hdfs:// | cut -f1,2,3 -d '/' | grep -v '{fs_root}' | head -1")
+  code, out = shell.call(cmd)
+
+  if code == 0 and out.strip() != "" and params.fs_root.strip() != out.strip():
+    out = out.strip()
+    cmd = format("{metatool_cmd} -updateLocation {fs_root} {out}")
+    Execute(cmd,
+            user=params.hive_user,
+            environment={'PATH': params.execute_path}
+    )
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/66984d9a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/mysql_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/mysql_server.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/mysql_server.py
new file mode 100644
index 0000000..4ab79a7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/mysql_server.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+import os
+import mysql_users
+from resource_management import *
+
+from mysql_service import mysql_service
+from mysql_utils import mysql_configure
+
+
+class MysqlServer(Script):
+  def install(self, env):
+    import params
+    self.install_packages(env, exclude_packages=params.hive_exclude_packages)
+    self.configure(env)
+
+  def clean(self, env):
+    import params
+    env.set_params(params)
+    mysql_users.mysql_deluser()
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    mysql_configure()
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    mysql_service(daemon_name=params.daemon_name, action='start')
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    mysql_service(daemon_name=params.daemon_name, action='stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+
+    mysql_service(daemon_name=status_params.daemon_name, action='status')
+
+
+if __name__ == "__main__":
+  MysqlServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/66984d9a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/mysql_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/mysql_service.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/mysql_service.py
new file mode 100644
index 0000000..ff1fda4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/mysql_service.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def mysql_service(daemon_name=None, action='start'): 
+  cnf = file('/etc/my.cnf')
+  for line in cnf:
+    if line.strip().startswith('pid-file'):
+      pid_file = line.split('=')[1].strip()
+      break
+  pid_expression = "`" + "cat " + pid_file + "`"
+  status_cmd = "ls " + pid_file + " >/dev/null 2>&1 && ps -p " + pid_expression + " >/dev/null 2>&1"  
+  cmd = ('service', daemon_name, action)
+
+  if action == 'status':
+    Execute(status_cmd)
+  elif action == 'stop':
+    import params
+    Execute(cmd,
+            logoutput = True,
+            only_if = status_cmd,
+            sudo = True,
+    )
+  elif action == 'start':
+    import params   
+    Execute(cmd,
+      logoutput = True,
+      not_if = status_cmd,
+      sudo = True,
+    )
+    Execute(("chmod", "664", pid_file), sudo=True)
+
+
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/66984d9a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/mysql_users.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/mysql_users.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/mysql_users.py
new file mode 100644
index 0000000..c023548
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/mysql_users.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+# Used to add hive access to the needed components
+def mysql_adduser():
+  import params
+  
+  File(params.mysql_adduser_path,
+       mode=0755,
+       content=StaticFile('addMysqlUser.sh')
+  )
+  hive_server_host = format("{hive_server_host}")
+  hive_metastore_host = format("{hive_metastore_host}")
+
+  add_metastore_cmd = "bash -x {mysql_adduser_path} {daemon_name} {hive_metastore_user_name} {hive_metastore_user_passwd!p} {hive_metastore_host}"
+  add_hiveserver_cmd = "bash -x {mysql_adduser_path} {daemon_name} {hive_metastore_user_name} {hive_metastore_user_passwd!p} {hive_server_host}"
+  if (hive_server_host == hive_metastore_host):
+    cmd = format(add_hiveserver_cmd)
+  else:
+    cmd = format(add_hiveserver_cmd + ";" + add_metastore_cmd)
+  Execute(cmd,
+          tries=3,
+          try_sleep=5,
+          logoutput=False,
+          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
+  )
+
+# Removes hive access from components
+def mysql_deluser():
+  import params
+  
+  File(params.mysql_deluser_path,
+       mode=0755,
+       content=StaticFile('removeMysqlUser.sh')
+  )
+  hive_server_host = format("{hive_server_host}")
+  hive_metastore_host = format("{hive_metastore_host}")
+
+  del_hiveserver_cmd = "bash -x {mysql_deluser_path} {daemon_name} {hive_metastore_user_name} {hive_server_host}"
+  del_metastore_cmd = "bash -x {mysql_deluser_path} {daemon_name} {hive_metastore_user_name} {hive_metastore_host}"
+  if (hive_server_host == hive_metastore_host):
+    cmd = format(del_hiveserver_cmd)
+  else:
+    cmd = format(
+      del_hiveserver_cmd + ";" + del_metastore_cmd)
+  Execute(cmd,
+          tries=3,
+          try_sleep=5,
+          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+  )
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/66984d9a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/mysql_utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/mysql_utils.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/mysql_utils.py
new file mode 100644
index 0000000..5006b56
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/mysql_utils.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import mysql_users
+
+def mysql_configure():
+  import params
+
+  # required for running hive
+  replace_bind_address = ('sed','-i','s|^bind-address[ \t]*=.*|bind-address = 0.0.0.0|',params.mysql_configname)
+  Execute(replace_bind_address,
+          sudo = True,
+  )
+  
+  # this also will start mysql-server
+  mysql_users.mysql_adduser()
+  
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/66984d9a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/params.py
new file mode 100644
index 0000000..f10a3f3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/params.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from ambari_commons import OSCheck
+from resource_management.libraries.functions.default import default
+
+if OSCheck.is_windows_family():
+  from params_windows import *
+else:
+  from params_linux import *
+
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+retryAble = default("/commandParams/command_retry_enabled", False)

http://git-wip-us.apache.org/repos/asf/ambari/blob/66984d9a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/params_linux.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/params_linux.py
new file mode 100644
index 0000000..ea156c8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/params_linux.py
@@ -0,0 +1,535 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import status_params
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+import os
+
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from ambari_commons.os_check import OSCheck
+
+from resource_management.libraries.functions.constants import Direction
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions.copy_tarball import STACK_VERSION_PATTERN
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions.get_port_from_url import get_port_from_url
+from resource_management.libraries import functions
+
+# server configurations
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+sudo = AMBARI_SUDO_BINARY
+
+stack_name = default("/hostLevelParams/stack_name", None)
+
+# node hostname
+hostname = config["hostname"]
+
+# This is expected to be of the form #.#.#.#
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+stack_version = format_hdp_stack_version(stack_version_unformatted)
+stack_is_21 = False
+
+# this is not available on INSTALL action because hdp-select is not available
+iop_stack_version = functions.get_hdp_version('hive-server2')
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade.
+# It cannot be used during the initial Cluser Install because the version is not yet known.
+version = default("/commandParams/version", None)
+
+# current host stack version
+current_version = default("/hostLevelParams/current_version", None)
+
+# When downgrading the 'version' and 'current_version' are both pointing to the downgrade-target version
+# downgrade_from_version provides the source-version the downgrade is happening from 
+downgrade_from_version = default("/commandParams/downgrade_from_version", None)
+
+# Upgrade direction
+upgrade_direction = default("/commandParams/upgrade_direction", None)
+
+component_directory = status_params.component_directory
+hadoop_bin_dir = "/usr/bin"
+hadoop_home = '/usr'
+hive_bin = '/usr/lib/hive/bin'
+hive_lib = '/usr/lib/hive/lib'
+
+#Hbase params keep hbase lib here,if not,mapreduce job doesn't work for hive.
+hbase_lib = '/usr/iop/current/hbase-client/lib'
+
+# Hadoop params
+hadoop_bin_dir = conf_select.get_hadoop_dir("bin")
+hadoop_home = '/usr/iop/current/hadoop-client'
+hive_bin = format('/usr/iop/current/{component_directory}/bin')
+hive_lib = format('/usr/iop/current/{component_directory}/lib')
+hive_var_lib = '/var/lib/hive'
+
+# if this is a server action, then use the server binaries; smoke tests
+# use the client binaries
+command_role = default("/role", "")
+server_role_dir_mapping = { 'HIVE_SERVER' : 'hive-server2',
+  'HIVE_METASTORE' : 'hive-metastore' }
+
+if command_role in server_role_dir_mapping:
+  hive_server_root = server_role_dir_mapping[command_role]
+  hive_bin = format('/usr/iop/current/{hive_server_root}/bin')
+  hive_lib = format('/usr/iop/current/{hive_server_root}/lib')
+
+hive_specific_configs_supported = False
+hive_etc_dir_prefix = "/etc/hive"
+limits_conf_dir = "/etc/security/limits.d"
+hive_site_config = dict(config['configurations']['hive-site'])
+hive_user_nofile_limit = default("/configurations/hive-env/hive_user_nofile_limit", "32000")
+hive_user_nproc_limit = default("/configurations/hive-env/hive_user_nproc_limit", "16000")
+
+
+# use the directories from status_params as they are already calculated for
+# the correct version of BigInsights
+hadoop_conf_dir = status_params.hadoop_conf_dir
+hadoop_bin_dir = status_params.hadoop_bin_dir
+webhcat_conf_dir = status_params.webhcat_conf_dir
+hive_conf_dir = status_params.hive_conf_dir
+hive_config_dir = status_params.hive_config_dir
+hive_client_conf_dir = status_params.hive_client_conf_dir
+hive_server_conf_dir = status_params.hive_server_conf_dir
+
+hcat_conf_dir = '/etc/hive-hcatalog/conf'
+config_dir = '/etc/hive-webhcat/conf'
+hcat_lib = '/usr/iop/current/hive-webhcat/share/hcatalog'
+webhcat_bin_dir = '/usr/iop/current/hive-webhcat/sbin'
+
+# Starting from IOP 4.1.0.0 drop should be executed with purge suffix
+purge_tables = "false"
+if Script.is_hdp_stack_greater_or_equal("4.1.0.0"):
+  purge_tables = 'true'
+
+  # this is NOT a typo.  IOP 4.1 configs for hcatalog/webhcat point to a
+  # specific directory which is NOT called 'conf'
+  hcat_conf_dir = '/usr/iop/current/hive-webhcat/etc/hcatalog'
+  config_dir = '/usr/iop/current/hive-webhcat/etc/webhcat'
+
+component_directory = status_params.component_directory
+hadoop_home = '/usr/iop/current/hadoop-client'
+hive_bin = format('/usr/iop/current/{component_directory}/bin')
+hive_lib = format('/usr/iop/current/{component_directory}/lib')
+
+# there are no client versions of these, use server versions directly
+hcat_lib = '/usr/iop/current/hive-webhcat/share/hcatalog'
+webhcat_bin_dir = '/usr/iop/current/hive-webhcat/sbin'
+
+# --- Tarballs ---
+# DON'T CHANGE THESE VARIABLE NAMES
+# Values don't change from those in copy_tarball.py
+  
+hive_tar_source = "/usr/iop/{0}/hive/hive.tar.gz".format(STACK_VERSION_PATTERN)
+pig_tar_source = "/usr/iop/{0}/pig/pig.tar.gz".format(STACK_VERSION_PATTERN)
+hive_tar_dest_file = "/iop/apps/{0}/hive/hive.tar.gz".format(STACK_VERSION_PATTERN)
+pig_tar_dest_file = "/iop/apps/{0}/pig/pig.tar.gz".format(STACK_VERSION_PATTERN)
+
+hadoop_streaming_tar_source = "/usr/iop/{0}/hadoop-mapreduce/hadoop-streaming.jar".format(STACK_VERSION_PATTERN)
+sqoop_tar_source = "/usr/iop/{0}/sqoop/sqoop.tar.gz".format(STACK_VERSION_PATTERN)
+hadoop_streaming_tar_dest_dir = "/iop/apps/{0}/mapreduce/".format(STACK_VERSION_PATTERN)
+sqoop_tar_dest_dir = "/iop/apps/{0}/sqoop/".format(STACK_VERSION_PATTERN)
+
+tarballs_mode = 0444
+
+if Script.is_hdp_stack_greater_or_equal("4.2.0.0"):
+  # need to set it to false if it is to downgrade from 4.2 to 4.1
+  if upgrade_direction is not None and upgrade_direction == Direction.DOWNGRADE and version is not None and compare_versions(format_hdp_stack_version(version), '4.2.0.0') < 0:
+    hive_specific_configs_supported = False
+  else:
+    #means it's either an upgrade or a fresh install of 4.2
+    hive_specific_configs_supported = True
+
+execute_path = os.environ['PATH'] + os.pathsep + hive_bin + os.pathsep + hadoop_bin_dir
+hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
+hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
+
+hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
+hive_metastore_user_passwd = unicode(hive_metastore_user_passwd) if not is_empty(hive_metastore_user_passwd) else hive_metastore_user_passwd
+hive_metastore_db_type = config['configurations']['hive-env']['hive_database_type']
+#HACK Temporarily use dbType=azuredb while invoking schematool
+if hive_metastore_db_type == "mssql":
+  hive_metastore_db_type = "azuredb"
+
+#users
+hive_user = config['configurations']['hive-env']['hive_user']
+#JDBC driver jar name
+hive_jdbc_driver = config['configurations']['hive-site']['javax.jdo.option.ConnectionDriverName']
+# NOT SURE THAT IT'S A GOOD IDEA TO USE PATH TO CLASS IN DRIVER, MAYBE IT WILL BE BETTER TO USE DB TYPE.
+# BECAUSE PATH TO CLASSES COULD BE CHANGED
+sqla_db_used = False
+if hive_jdbc_driver == "com.microsoft.sqlserver.jdbc.SQLServerDriver":
+  jdbc_jar_name = "sqljdbc4.jar"
+  jdbc_symlink_name = "mssql-jdbc-driver.jar"
+elif hive_jdbc_driver == "com.mysql.jdbc.Driver":
+  jdbc_jar_name = "mysql-connector-java.jar"
+  jdbc_symlink_name = "mysql-jdbc-driver.jar"
+elif hive_jdbc_driver == "org.postgresql.Driver":
+  jdbc_jar_name = "postgresql-jdbc.jar"
+  jdbc_symlink_name = "postgres-jdbc-driver.jar"
+elif hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
+  jdbc_jar_name = "ojdbc.jar"
+  jdbc_symlink_name = "oracle-jdbc-driver.jar"
+elif hive_jdbc_driver == "sap.jdbc4.sqlanywhere.IDriver":
+  jdbc_jar_name = "sajdbc4.jar"
+  jdbc_symlink_name = "sqlanywhere-jdbc-driver.tar.gz"
+  sqla_db_used = True
+
+check_db_connection_jar_name = "DBConnectionVerification.jar"
+check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
+hive_jdbc_drivers_list = ["com.microsoft.sqlserver.jdbc.SQLServerDriver","com.mysql.jdbc.Driver",
+                          "org.postgresql.Driver","oracle.jdbc.driver.OracleDriver","sap.jdbc4.sqlanywhere.IDriver"]
+downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}")
+prepackaged_ojdbc_symlink = format("{hive_lib}/ojdbc6.jar")
+templeton_port = config['configurations']['webhcat-site']['templeton.port']
+
+#constants for type2 jdbc
+jdbc_libs_dir = format("{hive_lib}/native/lib64")
+lib_dir_available = os.path.exists(jdbc_libs_dir)
+
+if sqla_db_used:
+  jars_path_in_archive = format("{tmp_dir}/sqla-client-jdbc/java/*")
+  libs_path_in_archive = format("{tmp_dir}/sqla-client-jdbc/native/lib64/*")
+  downloaded_custom_connector = format("{tmp_dir}/sqla-client-jdbc.tar.gz")
+  libs_in_hive_lib = format("{jdbc_libs_dir}/*")
+
+#common
+hive_metastore_hosts = config['clusterHostInfo']['hive_metastore_host']
+hive_metastore_host = hive_metastore_hosts[0]
+hive_metastore_port = get_port_from_url(config['configurations']['hive-site']['hive.metastore.uris']) #"9083"
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+hive_server_host = config['clusterHostInfo']['hive_server_host'][0]
+hive_server_hosts = config['clusterHostInfo']['hive_server_host']
+hive_transport_mode = config['configurations']['hive-site']['hive.server2.transport.mode']
+
+if hive_transport_mode.lower() == "http":
+  hive_server_port = config['configurations']['hive-site']['hive.server2.thrift.http.port']
+else:
+  hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000")
+
+hive_url = format("jdbc:hive2://{hive_server_host}:{hive_server_port}")
+hive_http_endpoint = default('/configurations/hive-site/hive.server2.thrift.http.path', "cliservice")
+hive_server_principal = config['configurations']['hive-site']['hive.server2.authentication.kerberos.principal']
+hive_server2_authentication = config['configurations']['hive-site']['hive.server2.authentication']
+
+# ssl options
+hive_ssl = default('/configurations/hive-site/hive.server2.use.SSL', False)
+hive_ssl_keystore_path = default('/configurations/hive-site/hive.server2.keystore.path', None)
+hive_ssl_keystore_password = default('/configurations/hive-site/hive.server2.keystore.password', None)
+
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+smoke_test_sql = format("{tmp_dir}/hiveserver2.sql")
+smoke_test_path = format("{tmp_dir}/hiveserver2Smoke.sh")
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
+
+fs_root = config['configurations']['core-site']['fs.defaultFS']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+hive_metastore_keytab_path =  config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
+
+hive_server2_keytab = config['configurations']['hive-site']['hive.server2.authentication.kerberos.keytab']
+
+#hive_env
+hive_log_dir = config['configurations']['hive-env']['hive_log_dir']
+hive_pid_dir = status_params.hive_pid_dir
+hive_pid = status_params.hive_pid
+
+#Default conf dir for client
+hive_conf_dirs_list = [hive_client_conf_dir]
+
+if hostname in hive_metastore_hosts or hostname in hive_server_hosts:
+  hive_conf_dirs_list.append(hive_server_conf_dir)
+
+#hive-site
+hive_database_name = config['configurations']['hive-env']['hive_database_name']
+hive_database = config['configurations']['hive-env']['hive_database']
+
+#Starting hiveserver2
+start_hiveserver2_script = 'startHiveserver2.sh.j2'
+
+##Starting metastore
+start_metastore_script = 'startMetastore.sh'
+hive_metastore_pid = status_params.hive_metastore_pid
+java_share_dir = '/usr/share/java'
+driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
+
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+yarn_user = config['configurations']['yarn-env']['yarn_user']
+user_group = config['configurations']['cluster-env']['user_group']
+artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
+# Need this for yarn.nodemanager.recovery.dir in yarn-site
+yarn_log_dir_prefix = config['configurations']['yarn-env']['yarn_log_dir_prefix']
+
+target = format("{hive_lib}/{jdbc_jar_name}")
+jars_in_hive_lib = format("{hive_lib}/*.jar")
+
+
+# normally, the JDBC driver would be referenced by /usr/hdp/current/.../foo.jar
+# but in RU if hdp-select is called and the restart fails, then this means that current pointer
+# is now pointing to the upgraded version location; that's bad for the cp command
+source_jdbc_file = format("/usr/iop/{current_version}/hive/lib/{jdbc_jar_name}")
+
+jdk_location = config['hostLevelParams']['jdk_location']
+driver_curl_source = format("{jdk_location}/{jdbc_symlink_name}")
+
+start_hiveserver2_path = format("{tmp_dir}/start_hiveserver2_script")
+start_metastore_path = format("{tmp_dir}/start_metastore_script")
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+
+if 'role' in config and config['role'] in ["HIVE_SERVER", "HIVE_METASTORE"]:
+  hive_heapsize = config['configurations']['hive-env']['hive.heapsize']
+else:
+  hive_heapsize = config['configurations']['hive-env']['hive.client.heapsize']
+
+hive_metastore_heapsize = config['configurations']['hive-env']['hive.metastore.heapsize']
+
+java64_home = config['hostLevelParams']['java_home']
+java_version = int(config['hostLevelParams']['java_version'])
+
+##### MYSQL
+db_name = config['configurations']['hive-env']['hive_database_name']
+mysql_group = 'mysql'
+mysql_host = config['clusterHostInfo']['hive_mysql_host']
+
+mysql_adduser_path = format("{tmp_dir}/addMysqlUser.sh")
+mysql_deluser_path = format("{tmp_dir}/removeMysqlUser.sh")
+
+#### Metastore
+# initialize the schema only if not in an upgrade/downgrade
+init_metastore_schema = upgrade_direction is None
+
+########## HCAT
+hcat_dbroot = hcat_lib
+
+hcat_user = config['configurations']['hive-env']['hcat_user']
+webhcat_user = config['configurations']['hive-env']['webhcat_user']
+
+hcat_pid_dir = status_params.hcat_pid_dir
+hcat_log_dir = config['configurations']['hive-env']['hcat_log_dir']
+hcat_env_sh_template = config['configurations']['hcat-env']['content']
+
+#hive-log4j.properties.template
+if (('hive-log4j' in config['configurations']) and ('content' in config['configurations']['hive-log4j'])):
+  log4j_props = config['configurations']['hive-log4j']['content']
+else:
+  log4j_props = None
+
+#webhcat-log4j.properties.template
+if (('webhcat-log4j' in config['configurations']) and ('content' in config['configurations']['webhcat-log4j'])):
+  log4j_webhcat_props = config['configurations']['webhcat-log4j']['content']
+else:
+  log4j_webhcat_props = None
+
+#hive-exec-log4j.properties.template
+if (('hive-exec-log4j' in config['configurations']) and ('content' in config['configurations']['hive-exec-log4j'])):
+  log4j_exec_props = config['configurations']['hive-exec-log4j']['content']
+else:
+  log4j_exec_props = None
+
+daemon_name = status_params.daemon_name
+process_name = status_params.process_name
+hive_env_sh_template = config['configurations']['hive-env']['content']
+
+hive_hdfs_user_dir = format("/user/{hive_user}")
+hive_hdfs_user_mode = 0700
+hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"]
+hive_exec_scratchdir = config['configurations']['hive-site']["hive.exec.scratchdir"]
+#for create_hdfs_directory
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', 'missing_principal').replace("_HOST", hostname)
+
+# Tez-related properties
+tez_user = None #config['configurations']['tez-env']['tez_user']
+
+# Tez jars
+tez_local_api_jars = None #'/usr/lib/tez/tez*.jar'
+tez_local_lib_jars = None #'/usr/lib/tez/lib/*.jar'
+
+# Tez libraries
+tez_lib_uris = None #default("/configurations/tez-site/tez.lib.uris", None)
+
+if OSCheck.is_ubuntu_family():
+  mysql_configname = '/etc/mysql/my.cnf'
+else:
+  mysql_configname = '/etc/my.cnf'
+  
+mysql_user = 'mysql'
+
+# Hive security
+hive_authorization_enabled = config['configurations']['hive-site']['hive.security.authorization.enabled']
+
+mysql_jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
+hive_use_existing_db = hive_database.startswith('Existing')
+hive_exclude_packages = []
+
+# There are other packages that contain /usr/share/java/mysql-connector-java.jar (like libmysql-java),
+# trying to install mysql-connector-java upon them can cause packages to conflict.
+if hive_use_existing_db:
+  hive_exclude_packages = ['mysql-connector-java', 'mysql', 'mysql-server',
+                           'mysql-community-release', 'mysql-community-server']
+else:
+  if 'role' in config and config['role'] != "MYSQL_SERVER":
+    hive_exclude_packages = ['mysql', 'mysql-server', 'mysql-community-release',
+                             'mysql-community-server']
+  if os.path.exists(mysql_jdbc_driver_jar):
+    hive_exclude_packages.append('mysql-connector-java')
+
+
+hive_site_config = dict(config['configurations']['hive-site'])
+
+########################################################
+########### WebHCat related params #####################
+########################################################
+
+webhcat_env_sh_template = config['configurations']['webhcat-env']['content']
+templeton_log_dir = config['configurations']['hive-env']['hcat_log_dir']
+templeton_pid_dir = status_params.hcat_pid_dir
+
+webhcat_pid_file = status_params.webhcat_pid_file
+
+templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
+
+
+webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
+
+hcat_hdfs_user_dir = format("/user/{hcat_user}")
+hcat_hdfs_user_mode = 0755
+webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
+webhcat_hdfs_user_mode = 0755
+#for create_hdfs_directory
+security_param = "true" if security_enabled else "false"
+
+
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create hdfs directory we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user = hdfs_user,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
+)
+
+
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
+
+#ranger hive properties
+policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
+sql_connector_jar = config['configurations']['admin-properties']['SQL_CONNECTOR_JAR']
+xa_audit_db_name = config['configurations']['admin-properties']['audit_db_name']
+xa_audit_db_user = config['configurations']['admin-properties']['audit_db_user']
+xa_db_host = config['configurations']['admin-properties']['db_host']
+repo_name = str(config['clusterName']) + '_hive'
+
+jdbc_driver_class_name = config['configurations']['ranger-hive-plugin-properties']['jdbc.driverClassName']
+common_name_for_certificate = config['configurations']['ranger-hive-plugin-properties']['common.name.for.certificate']
+
+repo_config_username = config['configurations']['ranger-hive-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
+
+ranger_env = config['configurations']['ranger-env']
+ranger_plugin_properties = config['configurations']['ranger-hive-plugin-properties']
+policy_user = config['configurations']['ranger-hive-plugin-properties']['policy_user']
+
+if security_enabled:
+  hive_principal = hive_server_principal.replace('_HOST',hostname.lower())
+
+#For curl command in ranger plugin to get db connector
+if has_ranger_admin:
+  enable_ranger_hive = (config['configurations']['hive-env']['hive_security_authorization'].lower() == 'ranger')
+  repo_config_password = unicode(config['configurations']['ranger-hive-plugin-properties']['REPOSITORY_CONFIG_PASSWORD'])
+  xa_audit_db_flavor = (config['configurations']['admin-properties']['DB_FLAVOR']).lower()
+
+  if xa_audit_db_flavor and xa_audit_db_flavor == 'mysql':
+    ranger_jdbc_symlink_name = "mysql-jdbc-driver.jar"
+    ranger_jdbc_jar_name = "mysql-connector-java.jar"
+    audit_jdbc_url = format('jdbc:mysql://{xa_db_host}/{xa_audit_db_name}')
+    jdbc_driver = "com.mysql.jdbc.Driver"
+  elif xa_audit_db_flavor and xa_audit_db_flavor == 'oracle':
+    ranger_jdbc_jar_name = "ojdbc6.jar"
+    ranger_jdbc_symlink_name = "oracle-jdbc-driver.jar"
+    colon_count = xa_db_host.count(':')
+    if colon_count == 2 or colon_count == 0:
+      audit_jdbc_url = format('jdbc:oracle:thin:@{xa_db_host}')
+    else:
+      audit_jdbc_url = format('jdbc:oracle:thin:@//{xa_db_host}')
+    jdbc_driver = "oracle.jdbc.OracleDriver"
+  elif xa_audit_db_flavor and xa_audit_db_flavor == 'postgres':
+    ranger_jdbc_jar_name = "postgresql.jar"
+    ranger_jdbc_symlink_name = "postgres-jdbc-driver.jar"
+    audit_jdbc_url = format('jdbc:postgresql://{xa_db_host}/{xa_audit_db_name}')
+    jdbc_driver = "org.postgresql.Driver"
+  elif xa_audit_db_flavor and xa_audit_db_flavor == 'mssql':
+    ranger_jdbc_jar_name = "sqljdbc4.jar"
+    ranger_jdbc_symlink_name = "mssql-jdbc-driver.jar"
+    audit_jdbc_url = format('jdbc:sqlserver://{xa_db_host};databaseName={xa_audit_db_name}')
+    jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
+
+  ranger_downloaded_custom_connector = format("{tmp_dir}/{ranger_jdbc_jar_name}")
+
+  ranger_driver_curl_source = format("{jdk_location}/{ranger_jdbc_symlink_name}")
+  ranger_driver_curl_target = format("{hive_lib}/{ranger_jdbc_jar_name}")
+
+  hive_ranger_plugin_config = {
+    'username': repo_config_username,
+    'password': repo_config_password,
+    'jdbc.driverClassName': jdbc_driver_class_name,
+    'jdbc.url': format("{hive_url}/default;principal={hive_principal}") if security_enabled else hive_url,
+    'commonNameForCertificate': common_name_for_certificate
+  }
+
+  hive_ranger_plugin_repo = {
+    'isActive': 'true',
+    'config': json.dumps(hive_ranger_plugin_config),
+    'description': 'hive repo',
+    'name': repo_name,
+    'repositoryType': 'hive',
+    'assetType': '3'
+  }
+
+  xa_audit_db_password = unicode(config['configurations']['admin-properties']['audit_db_password'])
+  ranger_audit_solr_urls = config['configurations']['ranger-admin-site']['ranger.audit.solr.urls']
+  xa_audit_db_is_enabled = config['configurations']['ranger-hive-audit']['xasecure.audit.destination.db'] if xml_configurations_supported else None
+  xa_audit_hdfs_is_enabled = config['configurations']['ranger-hive-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else None
+  ssl_keystore_password = unicode(config['configurations']['ranger-hive-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']) if xml_configurations_supported else None
+  ssl_truststore_password = unicode(config['configurations']['ranger-hive-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']) if xml_configurations_supported else None
+  credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if xml_configurations_supported else None
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/66984d9a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/params_windows.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/params_windows.py
new file mode 100644
index 0000000..f2524b2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/params_windows.py
@@ -0,0 +1,76 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from status_params import *
+
+# server configurations
+config = Script.get_config()
+
+# This is expected to be of the form #.#.#.#
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+
+hdp_root = None
+hive_conf_dir = None
+hive_home = None
+hive_lib_dir = None
+hive_log_dir = None
+hive_opts = None
+hcat_home = None
+hcat_config_dir = None
+hive_bin = None
+
+try:
+  hdp_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
+  hive_conf_dir = os.environ["HIVE_CONF_DIR"]
+  hive_home = os.environ["HIVE_HOME"]
+  hive_lib_dir = os.environ["HIVE_LIB_DIR"]
+  hive_log_dir = os.environ["HIVE_LOG_DIR"]
+  hive_opts = os.environ["HIVE_OPTS"]
+  hcat_home = os.environ["HCAT_HOME"]
+  hcat_config_dir = os.environ["WEBHCAT_CONF_DIR"]
+  hive_bin = os.path.join(hive_home, "bin")
+except:
+  pass
+
+hive_env_sh_template = config['configurations']['hive-env']['content']
+hive_warehouse_dir = config['configurations']['hive-site']['hive.metastore.warehouse.dir']
+hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
+hive_user = hadoop_user
+hcat_user = hadoop_user
+
+hive_metastore_db_type = config['configurations']['hive-env']['hive_database_type']
+hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
+hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
+
+hive_exclude_packages = []
+
+hive_execution_engine = config["configurations"]["hive-site"]["hive.execution.engine"]
+
+######## Metastore Schema
+init_metastore_schema = not config['configurations']['hive-site']['datanucleus.autoCreateSchema']
+
+service_map = {
+  "metastore" : hive_metastore_win_service_name,
+  "client" : hive_client_win_service_name,
+  "hiveserver2" : hive_server_win_service_name,
+  "templeton" : webhcat_server_win_service_name
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/66984d9a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/postgresql_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/postgresql_server.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/postgresql_server.py
new file mode 100644
index 0000000..a1cd13f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/postgresql_server.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from postgresql_service import postgresql_service
+
+class PostgreSQLServer(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    # init the database, the ':' makes the command always return 0 in case the database has
+    # already been initialized when the postgresql server colocates with ambari server
+    Execute(format("service {postgresql_daemon_name} initdb || :"))
+
+    # update the configuration files
+    self.update_pghda_conf(env)
+    self.update_postgresql_conf(env)
+
+    # restart the postgresql server for the changes to take effect
+    self.stop(env)
+    self.start(env)
+
+    # create the database and hive_metastore_user
+    File(params.postgresql_adduser_path,
+         mode=0755,
+         content=StaticFile(format("{postgresql_adduser_file}"))
+    )
+
+    cmd = format("bash -x {postgresql_adduser_path} {postgresql_daemon_name} {hive_metastore_user_name} {hive_metastore_user_passwd!p} {db_name}")
+
+    Execute(cmd,
+            tries=3,
+            try_sleep=5,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
+    )
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+
+    postgresql_service(postgresql_daemon_name=params.postgresql_daemon_name, action = 'start')
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    postgresql_service(postgresql_daemon_name=params.postgresql_daemon_name, action = 'stop')
+
+  def status(self, env):
+    import status_params
+    postgresql_service(postgresql_daemon_name=status_params.postgresql_daemon_name, action = 'status')
+
+  def update_postgresql_conf(self, env):
+    import params
+    env.set_params(params)
+
+    # change the listen_address to *
+    Execute(format("sed -i '/^[[:space:]]*listen_addresses[[:space:]]*=.*/d' {postgresql_conf_path}"))
+    Execute(format("echo \"listen_addresses = '*'\" | tee -a {postgresql_conf_path}"))
+
+    # change the standard_conforming_string to off
+    Execute(format("sed -i '/^[[:space:]]*standard_conforming_strings[[:space:]]*=.*/d' {postgresql_conf_path}"))
+    Execute(format("echo \"standard_conforming_strings = off\" | tee -a {postgresql_conf_path}"))
+
+  def update_pghda_conf(self, env):
+    import params
+    env.set_params(params)
+
+    # trust hive_metastore_user and postgres locally
+    Execute(format("sed -i '/^[[:space:]]*local[[:space:]]*all[[:space:]]*all.*$/s/^/#/' {postgresql_pghba_conf_path}"))
+    Execute(format("sed -i '/^[[:space:]]*local[[:space:]]*all[[:space:]]*postgres.*$/d' {postgresql_pghba_conf_path}"))
+    Execute(format("sed -i '/^[[:space:]]*local[[:space:]]*all[[:space:]]*\"{hive_metastore_user_name}\".*$/d' {postgresql_pghba_conf_path}"))
+    Execute(format("echo \"local   all   postgres   trust\" | tee -a {postgresql_pghba_conf_path}"))
+    Execute(format("echo \"local   all   \\\"{hive_metastore_user_name}\\\" trust\" | tee -a {postgresql_pghba_conf_path}"))
+
+    # trust hive_metastore_user and postgres via local interface
+    Execute(format("sed -i '/^[[:space:]]*host[[:space:]]*all[[:space:]]*all.*$/s/^/#/' {postgresql_pghba_conf_path}"))
+    Execute(format("sed -i '/^[[:space:]]*host[[:space:]]*all[[:space:]]*postgres.*$/d' {postgresql_pghba_conf_path}"))
+    Execute(format("sed -i '/^[[:space:]]*host[[:space:]]*all[[:space:]]*\"{hive_metastore_user_name}\".*$/d' {postgresql_pghba_conf_path}"))
+    Execute(format("echo \"host    all   postgres         0.0.0.0/0       trust\" | tee -a {postgresql_pghba_conf_path}"))
+    Execute(format("echo \"host    all   \\\"{hive_metastore_user_name}\\\"         0.0.0.0/0       trust\" | tee -a {postgresql_pghba_conf_path}"))
+
+if __name__ == "__main__":
+  PostgreSQLServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/66984d9a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/postgresql_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/postgresql_service.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/postgresql_service.py
new file mode 100644
index 0000000..6443e05
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/postgresql_service.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def postgresql_service(postgresql_daemon_name=None, action='start'):
+  status_cmd = format('service {postgresql_daemon_name} status | grep running')
+  cmd = format('service {postgresql_daemon_name} {action}')
+
+  if action == 'status':
+    Execute(status_cmd)
+  elif action == 'stop':
+    Execute(cmd,
+            logoutput = True,
+            only_if = status_cmd
+    )
+  elif action == 'start':
+    Execute(cmd,
+      logoutput = True,
+      not_if = status_cmd
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/66984d9a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/service_check.py
new file mode 100644
index 0000000..9b95d58
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/package/scripts/service_check.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import socket
+import sys
+import time
+from hcat_service_check import hcat_service_check
+from webhcat_service_check import webhcat_service_check
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+
+class HiveServiceCheck(Script):
+  pass
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    
+    if params.hive_server2_authentication == "KERBEROS" or params.hive_server2_authentication == "NONE":
+
+      address_list = params.hive_server_hosts
+
+      if not address_list:
+        raise Fail("Can not find any Hive Server host. Please check configuration.")
+
+      port = int(format("{hive_server_port}"))
+      print "Test connectivity to hive server"
+      if params.security_enabled:
+        kinitcmd=format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal}; ")
+      else:
+        kinitcmd=None
+
+      SOCKET_WAIT_SECONDS = 290
+
+      start_time = time.time()
+      end_time = start_time + SOCKET_WAIT_SECONDS
+
+      print "Waiting for the Hive server to start..."
+      
+      workable_server_available = False
+      i = 0
+      while time.time() < end_time and not workable_server_available:
+        address = address_list[i]
+        try:
+          check_thrift_port_sasl(address, port, params.hive_server2_authentication,
+                                 params.hive_server_principal, kinitcmd, params.smokeuser,
+                                 transport_mode=params.hive_transport_mode, http_endpoint=params.hive_http_endpoint,
+                                 ssl=params.hive_ssl, ssl_keystore=params.hive_ssl_keystore_path,
+                                 ssl_password=params.hive_ssl_keystore_password)
+          print "Successfully connected to %s on port %s" % (address, port)
+          workable_server_available = True
+        except:
+          print "Connection to %s on port %s failed" % (address, port)
+          time.sleep(5)
+        
+        i += 1
+        if i == len(address_list):
+          i = 0
+          
+      elapsed_time = time.time() - start_time
+    
+      if not workable_server_available:
+        raise Fail("Connection to Hive server %s on port %s failed after %d seconds" %
+                   (params.hostname, params.hive_server_port, elapsed_time))
+    
+      print "Successfully connected to Hive at %s on port %s after %d seconds" %\
+            (params.hostname, params.hive_server_port, elapsed_time)
+
+    hcat_service_check()
+    webhcat_service_check()
+
+if __name__ == "__main__":
+  HiveServiceCheck().execute()


Mime
View raw message