ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From vbrodets...@apache.org
Subject [04/12] ambari git commit: AMBARI-19832. HDP 3.0 support for Hive with configs, kerberos, widgets, metrics, quicklinks, and themes.(vbrodetskyi)
Date Fri, 24 Mar 2017 12:51:52 GMT
http://git-wip-us.apache.org/repos/asf/ambari/blob/80b87400/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/files/hiveTezSetup.cmd
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/files/hiveTezSetup.cmd b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/files/hiveTezSetup.cmd
new file mode 100644
index 0000000..10d6a1c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/files/hiveTezSetup.cmd
@@ -0,0 +1,58 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+if not defined HADOOP_HOME (
+  set EXITCODE=5
+  goto :errorexit
+)
+if not defined HIVE_HOME (
+  set EXITCODE=6
+  goto :errorexit
+)
+if not defined TEZ_HOME (
+  set EXITCODE=7
+  goto :errorexit
+)
+
+set EXITCODE=0
+
+if not exist %HIVE_HOME%\conf\hive-tez-configured (
+  %HADOOP_HOME%\bin\hadoop.cmd fs -mkdir /apps/tez
+  set EXITCODE=%ERRORLEVEL%
+  if %EXITCODE% neq 0 goto :errorexit
+
+  %HADOOP_HOME%\bin\hadoop.cmd fs -chmod -R 755 /apps/tez
+  set EXITCODE=%ERRORLEVEL%
+  if %EXITCODE% neq 0 goto :errorexit
+
+  %HADOOP_HOME%\bin\hadoop.cmd fs -chown -R hadoop:users /apps/tez
+  set EXITCODE=%ERRORLEVEL%
+  if %EXITCODE% neq 0 goto :errorexit
+
+  %HADOOP_HOME%\bin\hadoop.cmd fs -put %TEZ_HOME%\* /apps/tez
+  set EXITCODE=%ERRORLEVEL%
+  if %EXITCODE% neq 0 goto :errorexit
+
+  %HADOOP_HOME%\bin\hadoop.cmd fs -rm -r -skipTrash /apps/tez/conf
+  set EXITCODE=%ERRORLEVEL%
+  if %EXITCODE% neq 0 goto :errorexit
+
+  echo done > %HIVE_HOME%\conf\hive-tez-configured
+)
+goto :eof
+
+:errorexit
+exit /B %EXITCODE%

http://git-wip-us.apache.org/repos/asf/ambari/blob/80b87400/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/files/hiveserver2.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/files/hiveserver2.sql b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/files/hiveserver2.sql
new file mode 100644
index 0000000..99a3865
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/files/hiveserver2.sql
@@ -0,0 +1,23 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+CREATE EXTERNAL TABLE IF NOT EXISTS hiveserver2smoke20408 ( foo INT, bar STRING );
+DESCRIBE hiveserver2smoke20408;

http://git-wip-us.apache.org/repos/asf/ambari/blob/80b87400/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/files/hiveserver2Smoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/files/hiveserver2Smoke.sh b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/files/hiveserver2Smoke.sh
new file mode 100644
index 0000000..77d7b3e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/files/hiveserver2Smoke.sh
@@ -0,0 +1,32 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+smokeout=`/usr/lib/hive/bin/beeline -u $1 -n fakeuser -p fakepwd -d org.apache.hive.jdbc.HiveDriver -e '!run $2' 2>&1| awk '{print}'|grep Error`
+
+if [ "x$smokeout" == "x" ]; then
+  echo "Smoke test of hiveserver2 passed"
+  exit 0
+else
+  echo "Smoke test of hiveserver2 wasnt passed"
+  echo $smokeout
+  exit 1
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/80b87400/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/files/pigSmoke.sh b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/files/pigSmoke.sh
new file mode 100644
index 0000000..2e90ac0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/files/pigSmoke.sh
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+A = load 'passwd' using PigStorage(':');
+B = foreach A generate \$0 as id;
+store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/ambari/blob/80b87400/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/files/removeMysqlUser.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/files/removeMysqlUser.sh b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/files/removeMysqlUser.sh
new file mode 100644
index 0000000..7b6d331
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/files/removeMysqlUser.sh
@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+mysqldservice=$1
+mysqldbuser=$2
+userhost=$3
+myhostname=$(hostname -f)
+sudo_prefix = "/var/lib/ambari-agent/ambari-sudo.sh -H -E"
+
+$sudo_prefix service $mysqldservice start
+echo "Removing user $mysqldbuser@$userhost"
+/var/lib/ambari-agent/ambari-sudo.sh su mysql -s /bin/bash - -c "mysql -u root -e \"DROP USER '$mysqldbuser'@'$userhost';\""
+/var/lib/ambari-agent/ambari-sudo.sh su mysql -s /bin/bash - -c "mysql -u root -e \"flush privileges;\""
+$sudo_prefix service $mysqldservice stop

http://git-wip-us.apache.org/repos/asf/ambari/blob/80b87400/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/files/startMetastore.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/files/startMetastore.sh b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/files/startMetastore.sh
new file mode 100644
index 0000000..86541f0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/files/startMetastore.sh
@@ -0,0 +1,25 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+HIVE_BIN=${HIVE_BIN:-"hive"}
+
+HIVE_CONF_DIR=$4 $HIVE_BIN --service metastore -hiveconf hive.log.file=hivemetastore.log -hiveconf hive.log.dir=$5 > $1 2> $2 &
+echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/ambari/blob/80b87400/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/files/templetonSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/files/templetonSmoke.sh b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/files/templetonSmoke.sh
new file mode 100644
index 0000000..f5a0da1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/files/templetonSmoke.sh
@@ -0,0 +1,101 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export ttonhost=$1
+export smoke_test_user=$2
+export templeton_port=$3
+export ttonTestScript=$4
+export has_pig=$5
+export smoke_user_keytab=$6
+export security_enabled=$7
+export kinit_path_local=$8
+export smokeuser_principal=$9
+export tmp_dir=${10}
+export ttonurl="http://${ttonhost}:${templeton_port}/templeton/v1"
+
+if [[ $security_enabled == "true" ]]; then
+  kinitcmd="${kinit_path_local}  -kt ${smoke_user_keytab} ${smokeuser_principal}; "
+else
+  kinitcmd=""
+fi
+
+export no_proxy=$ttonhost
+cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>'  $ttonurl/status 2>&1"
+retVal=`/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
+httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+
+# try again for 2.3 username requirement
+if [[ "$httpExitCode" == "500" ]] ; then
+  cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>'  $ttonurl/status?user.name=$smoke_test_user 2>&1"
+  retVal=`/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
+  httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+fi
+
+if [[ "$httpExitCode" -ne "200" ]] ; then
+  echo "Templeton Smoke Test (status cmd): Failed. : $retVal"
+  export TEMPLETON_EXIT_CODE=1
+  exit 1
+fi
+
+#try hcat ddl command
+/var/lib/ambari-agent/ambari-sudo.sh rm -f ${tmp_dir}/show_db.post.txt
+echo "user.name=${smoke_test_user}&exec=show databases;" > ${tmp_dir}/show_db.post.txt
+/var/lib/ambari-agent/ambari-sudo.sh chown ${smoke_test_user} ${tmp_dir}/show_db.post.txt
+cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>' -d  @${tmp_dir}/show_db.post.txt  $ttonurl/ddl 2>&1"
+retVal=`/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
+httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+
+if [[ "$httpExitCode" -ne "200" ]] ; then
+  echo "Templeton Smoke Test (ddl cmd): Failed. : $retVal"
+  export TEMPLETON_EXIT_CODE=1
+  exit  1
+fi
+
+# NOT SURE?? SUHAS
+if [[ $security_enabled == "true" ]]; then
+  echo "Templeton Pig Smoke Tests not run in secure mode"
+  exit 0
+fi
+
+if [[ $has_pig != "True" ]]; then
+  echo "Templeton Pig Smoke Tests are not run, because Pig is not installed"
+  exit 0
+fi
+
+#try pig query
+
+#create, copy post args file
+/var/lib/ambari-agent/ambari-sudo.sh rm -f ${tmp_dir}/pig_post.txt
+echo -n "user.name=${smoke_test_user}&file=/tmp/$ttonTestScript" > ${tmp_dir}/pig_post.txt
+/var/lib/ambari-agent/ambari-sudo.sh chown ${smoke_test_user} ${tmp_dir}/pig_post.txt
+
+#submit pig query
+cmd="curl --negotiate -u : -s -w 'http_code <%{http_code}>' -d  @${tmp_dir}/pig_post.txt  $ttonurl/pig 2>&1"
+retVal=`/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
+httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+if [[ "$httpExitCode" -ne "200" ]] ; then
+  echo "Templeton Smoke Test (pig cmd): Failed. : $retVal"
+  export TEMPLETON_EXIT_CODE=1
+  exit 1
+fi
+
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/80b87400/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/__init__.py b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/__init__.py
new file mode 100644
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/80b87400/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hcat.py b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hcat.py
new file mode 100644
index 0000000..faa8c7e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hcat.py
@@ -0,0 +1,87 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+# Python Imports
+import os
+import sys
+
+# Local Imports
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.functions.format import format
+from resource_management.core.resources.system import Directory, File
+from resource_management.core.source import InlineTemplate
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster, setup_atlas_hook
+from ambari_commons import OSConst
+from ambari_commons.constants import SERVICE
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def hcat():
+  import params
+
+  XmlConfig("hive-site.xml",
+            conf_dir = params.hive_conf_dir,
+            configurations = params.config['configurations']['hive-site'],
+            owner=params.hive_user,
+            configuration_attributes=params.config['configuration_attributes']['hive-site']
+  )
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def hcat():
+  import params
+
+  Directory(params.hive_conf_dir,
+            create_parents = True,
+            owner=params.hive_user,
+            group=params.user_group,
+  )
+
+
+  Directory(params.hcat_conf_dir,
+            create_parents = True,
+            owner=params.webhcat_user,
+            group=params.user_group,
+  )
+
+  Directory(params.hcat_pid_dir,
+            owner=params.webhcat_user,
+            create_parents = True
+  )
+
+  XmlConfig("hive-site.xml",
+            conf_dir=params.hive_client_conf_dir,
+            configurations=params.config['configurations']['hive-site'],
+            configuration_attributes=params.config['configuration_attributes']['hive-site'],
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0644)
+
+  File(format("{hcat_conf_dir}/hcat-env.sh"),
+       owner=params.webhcat_user,
+       group=params.user_group,
+       content=InlineTemplate(params.hcat_env_sh_template)
+  )
+
+  # Generate atlas-application.properties.xml file
+  if params.enable_atlas_hook:
+    atlas_hook_filepath = os.path.join(params.hive_config_dir, params.atlas_hook_filename)
+    setup_atlas_hook(SERVICE.HIVE, params.hive_atlas_application_properties, atlas_hook_filepath, params.hive_user, params.user_group)

http://git-wip-us.apache.org/repos/asf/ambari/blob/80b87400/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hcat_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hcat_client.py b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hcat_client.py
new file mode 100644
index 0000000..47bbc41
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hcat_client.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from hcat import hcat
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import ClientComponentHasNoStatus
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.script.script import Script
+
+
+class HCatClient(Script):
+  def install(self, env):
+    import params
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hcat()
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HCatClientWindows(HCatClient):
+  pass
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HCatClientDefault(HCatClient):
+  def get_component_name(self):
+    # HCat client doesn't have a first-class entry in <stack-selector-tool>. Since clients always
+    # update after daemons, this ensures that the hcat directories are correct on hosts
+    # which do not include the WebHCat daemon
+    return "hive-webhcat"
+
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    """
+    Execute <stack-selector-tool> before reconfiguring this client to the new stack version.
+
+    :param env:
+    :param upgrade_type:
+    :return:
+    """
+    Logger.info("Executing Hive HCat Client Stack Upgrade pre-restart")
+
+    import params
+    env.set_params(params)
+
+    # this function should not execute if the stack version does not support rolling upgrade
+    if not (params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version)):
+      return
+
+    # HCat client doesn't have a first-class entry in <stack-selector-tool>. Since clients always
+    # update after daemons, this ensures that the hcat directories are correct on hosts
+    # which do not include the WebHCat daemon
+    stack_select.select("hive-webhcat", params.version)
+
+
+if __name__ == "__main__":
+  HCatClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/80b87400/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hcat_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hcat_service_check.py b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hcat_service_check.py
new file mode 100644
index 0000000..25d0c81
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hcat_service_check.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+from resource_management.libraries.functions import get_unique_id_and_date
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.resources.execute_hadoop import ExecuteHadoop
+from resource_management.core.source import StaticFile
+from resource_management.core.resources.system import Execute, File
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def hcat_service_check():
+  import params
+  smoke_cmd = os.path.join(params.stack_root, "Run-SmokeTests.cmd")
+  service = "HCatalog"
+  Execute(format("cmd /C {smoke_cmd} {service}"), user=params.webhcat_user, logoutput=True)
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def hcat_service_check():
+    import params
+    unique = get_unique_id_and_date()
+    output_file = format("{hive_apps_whs_dir}/hcatsmoke{unique}")
+    test_cmd = format("fs -test -e {output_file}")
+
+    if params.security_enabled:
+      kinit_cmd = format(
+        "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal}; ")
+    else:
+      kinit_cmd = ""
+
+    File(format("{tmp_dir}/hcatSmoke.sh"),
+         content=StaticFile("hcatSmoke.sh"),
+         mode=0755
+    )
+
+    prepare_cmd = format("{kinit_cmd}env JAVA_HOME={java64_home} {tmp_dir}/hcatSmoke.sh hcatsmoke{unique} prepare {purge_tables}")
+
+    exec_path = params.execute_path
+    if params.version and params.stack_root:
+      upgrade_hive_bin = format("{stack_root}/{version}/hive/bin")
+      exec_path =  os.environ['PATH'] + os.pathsep + params.hadoop_bin_dir + os.pathsep + upgrade_hive_bin
+
+    Execute(prepare_cmd,
+            tries=3,
+            user=params.smokeuser,
+            try_sleep=5,
+            path=['/usr/sbin', '/usr/local/bin', '/bin', '/usr/bin', exec_path],
+            logoutput=True)
+
+    if params.security_enabled:
+      Execute (format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
+               user = params.hdfs_user,
+      )
+
+    ExecuteHadoop(test_cmd,
+                  user=params.hdfs_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  bin_dir=params.execute_path
+    )
+
+    cleanup_cmd = format("{kinit_cmd} {tmp_dir}/hcatSmoke.sh hcatsmoke{unique} cleanup {purge_tables}")
+
+    Execute(cleanup_cmd,
+            tries=3,
+            user=params.smokeuser,
+            try_sleep=5,
+            path=['/usr/sbin', '/usr/local/bin', '/bin', '/usr/bin', exec_path],
+            logoutput=True)

http://git-wip-us.apache.org/repos/asf/ambari/blob/80b87400/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive.py b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive.py
new file mode 100644
index 0000000..b7b04a2
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive.py
@@ -0,0 +1,520 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import glob
+from urlparse import urlparse
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
+from resource_management.libraries.functions.get_config import get_config
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.core.resources.service import ServiceConfig
+from resource_management.core.resources.system import File, Execute, Directory
+from resource_management.core.source import StaticFile, Template, DownloadSource, InlineTemplate
+from resource_management.core.shell import as_user
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.functions.format import format
+from resource_management.core.exceptions import Fail
+from resource_management.core.shell import as_sudo
+from resource_management.core.shell import quote_bash_args
+from resource_management.core.logger import Logger
+from resource_management.core import utils
+from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster, setup_atlas_hook
+from resource_management.libraries.functions.security_commons import update_credential_provider_path
+from ambari_commons.constants import SERVICE
+
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def hive(name=None):
+  import params
+
+  hive_client_conf_path = format("{stack_root}/current/{component_directory}/conf")
+  # Permissions 644 for conf dir (client) files, and 600 for conf.server
+  mode_identified = 0644 if params.hive_config_dir == hive_client_conf_path else 0600
+
+  Directory(params.hive_etc_dir_prefix,
+            mode=0755
+  )
+
+  # We should change configurations for client as well as for server.
+  # The reason is that stale-configs are service-level, not component.
+  Logger.info("Directories to fill with configs: %s" % str(params.hive_conf_dirs_list))
+  for conf_dir in params.hive_conf_dirs_list:
+    fill_conf_dir(conf_dir)
+
+  params.hive_site_config = update_credential_provider_path(params.hive_site_config,
+                                                     'hive-site',
+                                                     os.path.join(params.hive_conf_dir, 'hive-site.jceks'),
+                                                     params.hive_user,
+                                                     params.user_group
+                                                     )
+  XmlConfig("hive-site.xml",
+            conf_dir=params.hive_config_dir,
+            configurations=params.hive_site_config,
+            configuration_attributes=params.config['configuration_attributes']['hive-site'],
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=mode_identified)
+
+  # Generate atlas-application.properties.xml file
+  if params.enable_atlas_hook:
+    atlas_hook_filepath = os.path.join(params.hive_config_dir, params.atlas_hook_filename)
+    setup_atlas_hook(SERVICE.HIVE, params.hive_atlas_application_properties, atlas_hook_filepath, params.hive_user, params.user_group)
+  
+  File(format("{hive_config_dir}/hive-env.sh"),
+       owner=params.hive_user,
+       group=params.user_group,
+       content=InlineTemplate(params.hive_env_sh_template),
+       mode=mode_identified
+  )
+
+  # On some OS this folder could be not exists, so we will create it before pushing there files
+  Directory(params.limits_conf_dir,
+            create_parents = True,
+            owner='root',
+            group='root'
+            )
+
+  File(os.path.join(params.limits_conf_dir, 'hive.conf'),
+       owner='root',
+       group='root',
+       mode=0644,
+       content=Template("hive.conf.j2")
+       )
+  if params.security_enabled:
+    File(os.path.join(params.hive_config_dir, 'zkmigrator_jaas.conf'),
+         owner=params.hive_user,
+         group=params.user_group,
+         content=Template("zkmigrator_jaas.conf.j2")
+         )
+
+  File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
+       content = DownloadSource(format("{jdk_location}{check_db_connection_jar_name}")),
+       mode = 0644,
+  )
+
+  if name != "client":
+    setup_non_client()
+  if name == 'hiveserver2':
+    setup_hiveserver2()
+  if name == 'metastore':
+    setup_metastore()
+
+def setup_hiveserver2():
+  import params
+  
+  File(params.start_hiveserver2_path,
+       mode=0755,
+       content=Template(format('{start_hiveserver2_script}'))
+  )
+
+  File(os.path.join(params.hive_server_conf_dir, "hadoop-metrics2-hiveserver2.properties"),
+       owner=params.hive_user,
+       group=params.user_group,
+       content=Template("hadoop-metrics2-hiveserver2.properties.j2"),
+       mode=0600
+  )
+  XmlConfig("hiveserver2-site.xml",
+            conf_dir=params.hive_server_conf_dir,
+            configurations=params.config['configurations']['hiveserver2-site'],
+            configuration_attributes=params.config['configuration_attributes']['hiveserver2-site'],
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0600)
+  
+  # copy tarball to HDFS feature not supported
+  if not (params.stack_version_formatted_major and check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS, params.stack_version_formatted_major)):  
+    params.HdfsResource(params.webhcat_apps_dir,
+                          type="directory",
+                          action="create_on_execute",
+                          owner=params.webhcat_user,
+                          mode=0755
+                        )
+  
+  # Create webhcat dirs.
+  if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
+    params.HdfsResource(params.hcat_hdfs_user_dir,
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.webhcat_user,
+                         mode=params.hcat_hdfs_user_mode
+    )
+
+  params.HdfsResource(params.webhcat_hdfs_user_dir,
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.webhcat_user,
+                       mode=params.webhcat_hdfs_user_mode
+  )
+
+  # ****** Begin Copy Tarballs ******
+  # *********************************
+  #  if copy tarball to HDFS feature  supported copy mapreduce.tar.gz and tez.tar.gz to HDFS
+  if params.stack_version_formatted_major and check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS, params.stack_version_formatted_major):
+    copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
+    copy_to_hdfs("tez", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
+
+  # Always copy pig.tar.gz and hive.tar.gz using the appropriate mode.
+  # This can use a different source and dest location to account
+  copy_to_hdfs("pig",
+               params.user_group,
+               params.hdfs_user,
+               file_mode=params.tarballs_mode,
+               custom_source_file=params.pig_tar_source,
+               custom_dest_file=params.pig_tar_dest_file,
+               skip=params.sysprep_skip_copy_tarballs_hdfs)
+  copy_to_hdfs("hive",
+               params.user_group,
+               params.hdfs_user,
+               file_mode=params.tarballs_mode,
+               custom_source_file=params.hive_tar_source,
+               custom_dest_file=params.hive_tar_dest_file,
+               skip=params.sysprep_skip_copy_tarballs_hdfs)
+
+  wildcard_tarballs = ["sqoop", "hadoop_streaming"]
+  for tarball_name in wildcard_tarballs:
+    source_file_pattern = eval("params." + tarball_name + "_tar_source")
+    dest_dir = eval("params." + tarball_name + "_tar_dest_dir")
+
+    if source_file_pattern is None or dest_dir is None:
+      continue
+
+    source_files = glob.glob(source_file_pattern) if "*" in source_file_pattern else [source_file_pattern]
+    for source_file in source_files:
+      src_filename = os.path.basename(source_file)
+      dest_file = os.path.join(dest_dir, src_filename)
+
+      copy_to_hdfs(tarball_name,
+                   params.user_group,
+                   params.hdfs_user,
+                   file_mode=params.tarballs_mode,
+                   custom_source_file=source_file,
+                   custom_dest_file=dest_file,
+                   skip=params.sysprep_skip_copy_tarballs_hdfs)
+  # ******* End Copy Tarballs *******
+  # *********************************
+  
+  # if warehouse directory is in DFS
+  if not params.whs_dir_protocol or params.whs_dir_protocol == urlparse(params.default_fs).scheme:
+    # Create Hive Metastore Warehouse Dir
+    params.HdfsResource(params.hive_apps_whs_dir,
+                         type="directory",
+                          action="create_on_execute",
+                          owner=params.hive_user,
+                          mode=0777
+    )
+  else:
+    Logger.info(format("Not creating warehouse directory '{hive_apps_whs_dir}', as the location is not in DFS."))
+
+  # Create Hive User Dir
+  params.HdfsResource(params.hive_hdfs_user_dir,
+                       type="directory",
+                        action="create_on_execute",
+                        owner=params.hive_user,
+                        mode=params.hive_hdfs_user_mode
+  )
+  
+  if not is_empty(params.hive_exec_scratchdir) and not urlparse(params.hive_exec_scratchdir).path.startswith("/tmp"):
+    params.HdfsResource(params.hive_exec_scratchdir,
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.hive_user,
+                         group=params.hdfs_user,
+                         mode=0777) # Hive expects this dir to be writeable by everyone as it is used as a temp dir
+    
+  params.HdfsResource(None, action="execute")
+  
+def setup_non_client():
+  import params
+  
+  Directory(params.hive_pid_dir,
+            create_parents = True,
+            cd_access='a',
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0755)
+  Directory(params.hive_log_dir,
+            create_parents = True,
+            cd_access='a',
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0755)
+  Directory(params.hive_var_lib,
+            create_parents = True,
+            cd_access='a',
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0755)
+
+  if params.hive_jdbc_target is not None and not os.path.exists(params.hive_jdbc_target):
+    jdbc_connector(params.hive_jdbc_target, params.hive_previous_jdbc_jar)
+  if params.hive2_jdbc_target is not None and not os.path.exists(params.hive2_jdbc_target):
+    jdbc_connector(params.hive2_jdbc_target, params.hive2_previous_jdbc_jar)
+    
+def setup_metastore():
+  import params
+  
+  if params.hive_metastore_site_supported:
+    hivemetastore_site_config = get_config("hivemetastore-site")
+    if hivemetastore_site_config:
+      XmlConfig("hivemetastore-site.xml",
+                conf_dir=params.hive_server_conf_dir,
+                configurations=params.config['configurations']['hivemetastore-site'],
+                configuration_attributes=params.config['configuration_attributes']['hivemetastore-site'],
+                owner=params.hive_user,
+                group=params.user_group,
+                mode=0600)
+  
+  File(os.path.join(params.hive_server_conf_dir, "hadoop-metrics2-hivemetastore.properties"),
+       owner=params.hive_user,
+       group=params.user_group,
+       content=Template("hadoop-metrics2-hivemetastore.properties.j2"),
+       mode=0600
+  )
+
+  File(params.start_metastore_path,
+       mode=0755,
+       content=StaticFile('startMetastore.sh')
+  )
+
+def create_metastore_schema():
+  import params
+
+  create_schema_cmd = format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
+                             "{hive_schematool_bin}/schematool -initSchema "
+                             "-dbType {hive_metastore_db_type} "
+                             "-userName {hive_metastore_user_name} "
+                             "-passWord {hive_metastore_user_passwd!p} -verbose")
+
+  check_schema_created_cmd = as_user(format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
+                                    "{hive_schematool_bin}/schematool -info "
+                                    "-dbType {hive_metastore_db_type} "
+                                    "-userName {hive_metastore_user_name} "
+                                    "-passWord {hive_metastore_user_passwd!p} -verbose"), params.hive_user)
+
+  # HACK: in cases with quoted passwords and as_user (which does the quoting as well) !p won't work for hiding passwords.
+  # Fixing it with the hack below:
+  quoted_hive_metastore_user_passwd = quote_bash_args(quote_bash_args(params.hive_metastore_user_passwd))
+  if quoted_hive_metastore_user_passwd[0] == "'" and quoted_hive_metastore_user_passwd[-1] == "'" \
+      or quoted_hive_metastore_user_passwd[0] == '"' and quoted_hive_metastore_user_passwd[-1] == '"':
+    quoted_hive_metastore_user_passwd = quoted_hive_metastore_user_passwd[1:-1]
+  Logger.sensitive_strings[repr(check_schema_created_cmd)] = repr(check_schema_created_cmd.replace(
+      format("-passWord {quoted_hive_metastore_user_passwd}"), "-passWord " + utils.PASSWORDS_HIDE_STRING))
+
+  Execute(create_schema_cmd,
+          not_if = check_schema_created_cmd,
+          user = params.hive_user
+  )
+    
+"""
+Writes configuration files required by Hive.
+"""
+def fill_conf_dir(component_conf_dir):
+  import params
+  hive_client_conf_path = os.path.realpath(format("{stack_root}/current/{component_directory}/conf"))
+  component_conf_dir = os.path.realpath(component_conf_dir)
+  mode_identified_for_file = 0644 if component_conf_dir == hive_client_conf_path else 0600
+  mode_identified_for_dir = 0755 if component_conf_dir == hive_client_conf_path else 0700
+  Directory(component_conf_dir,
+            owner=params.hive_user,
+            group=params.user_group,
+            create_parents = True,
+            mode=mode_identified_for_dir
+  )
+
+  XmlConfig("mapred-site.xml",
+            conf_dir=component_conf_dir,
+            configurations=params.config['configurations']['mapred-site'],
+            configuration_attributes=params.config['configuration_attributes']['mapred-site'],
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=mode_identified_for_file)
+
+
+  File(format("{component_conf_dir}/hive-default.xml.template"),
+       owner=params.hive_user,
+       group=params.user_group,
+       mode=mode_identified_for_file
+  )
+
+  File(format("{component_conf_dir}/hive-env.sh.template"),
+       owner=params.hive_user,
+       group=params.user_group,
+       mode=mode_identified_for_file
+  )
+
+  # Create hive-log4j.properties and hive-exec-log4j.properties
+  # in /etc/hive/conf and not in /etc/hive2/conf
+  if params.log4j_version == '1':
+    log4j_exec_filename = 'hive-exec-log4j.properties'
+    if (params.log4j_exec_props != None):
+      File(format("{component_conf_dir}/{log4j_exec_filename}"),
+           mode=mode_identified_for_file,
+           group=params.user_group,
+           owner=params.hive_user,
+           content=InlineTemplate(params.log4j_exec_props)
+      )
+    elif (os.path.exists("{component_conf_dir}/{log4j_exec_filename}.template")):
+      File(format("{component_conf_dir}/{log4j_exec_filename}"),
+           mode=mode_identified_for_file,
+           group=params.user_group,
+           owner=params.hive_user,
+           content=StaticFile(format("{component_conf_dir}/{log4j_exec_filename}.template"))
+      )
+
+    log4j_filename = 'hive-log4j.properties'
+    if (params.log4j_props != None):
+      File(format("{component_conf_dir}/{log4j_filename}"),
+           mode=mode_identified_for_file,
+           group=params.user_group,
+           owner=params.hive_user,
+           content=InlineTemplate(params.log4j_props)
+      )
+    elif (os.path.exists("{component_conf_dir}/{log4j_filename}.template")):
+      File(format("{component_conf_dir}/{log4j_filename}"),
+           mode=mode_identified_for_file,
+           group=params.user_group,
+           owner=params.hive_user,
+           content=StaticFile(format("{component_conf_dir}/{log4j_filename}.template"))
+      )
+    pass # if params.log4j_version == '1'
+
+
+def jdbc_connector(target, hive_previous_jdbc_jar):
+  """
+  Shared by Hive Batch, Hive Metastore, and Hive Interactive
+  :param target: Target of jdbc jar name, which could be for any of the components above.
+  """
+  import params
+
+  if not params.jdbc_jar_name:
+    return
+
+  if params.hive_jdbc_driver in params.hive_jdbc_drivers_list and params.hive_use_existing_db:
+    environment = {
+      "no_proxy": format("{ambari_server_hostname}")
+    }
+
+    if hive_previous_jdbc_jar and os.path.isfile(hive_previous_jdbc_jar):
+      File(hive_previous_jdbc_jar, action='delete')
+
+    # TODO: should be removed after ranger_hive_plugin will not provide jdbc
+    if params.prepackaged_jdbc_name != params.jdbc_jar_name:
+      Execute(('rm', '-f', params.prepackaged_ojdbc_symlink),
+              path=["/bin", "/usr/bin/"],
+              sudo = True)
+    
+    File(params.downloaded_custom_connector,
+         content = DownloadSource(params.driver_curl_source))
+
+    # maybe it will be more correcvly to use db type
+    if params.sqla_db_used:
+      untar_sqla_type2_driver = ('tar', '-xvf', params.downloaded_custom_connector, '-C', params.tmp_dir)
+
+      Execute(untar_sqla_type2_driver, sudo = True)
+
+      Execute(format("yes | {sudo} cp {jars_path_in_archive} {hive_lib}"))
+
+      Directory(params.jdbc_libs_dir,
+                create_parents = True)
+
+      Execute(format("yes | {sudo} cp {libs_path_in_archive} {jdbc_libs_dir}"))
+
+      Execute(format("{sudo} chown -R {hive_user}:{user_group} {hive_lib}/*"))
+
+    else:
+      Execute(('cp', '--remove-destination', params.downloaded_custom_connector, target),
+            #creates=target, TODO: uncomment after ranger_hive_plugin will not provide jdbc
+            path=["/bin", "/usr/bin/"],
+            sudo = True)
+
+  else:
+    #for default hive db (Mysql)
+    Execute(('cp', '--remove-destination', format('/usr/share/java/{jdbc_jar_name}'), target),
+            #creates=target, TODO: uncomment after ranger_hive_plugin will not provide jdbc
+            path=["/bin", "/usr/bin/"],
+            sudo=True
+    )
+  pass
+
+  File(target,
+       mode = 0644,
+  )
+  
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def hive(name=None):
+  import params
+
+  XmlConfig("hive-site.xml",
+            conf_dir = params.hive_conf_dir,
+            configurations = params.config['configurations']['hive-site'],
+            owner=params.hive_user,
+            configuration_attributes=params.config['configuration_attributes']['hive-site']
+  )
+
+  if name in ["hiveserver2","metastore"]:
+    # Manually overriding service logon user & password set by the installation package
+    service_name = params.service_map[name]
+    ServiceConfig(service_name,
+                  action="change_user",
+                  username = params.hive_user,
+                  password = Script.get_password(params.hive_user))
+    Execute(format("cmd /c hadoop fs -mkdir -p {hive_warehouse_dir}"), logoutput=True, user=params.hadoop_user)
+
+  if name == 'metastore':
+    if params.init_metastore_schema:
+      check_schema_created_cmd = format('cmd /c "{hive_bin}\\hive.cmd --service schematool -info '
+                                        '-dbType {hive_metastore_db_type} '
+                                        '-userName {hive_metastore_user_name} '
+                                        '-passWord {hive_metastore_user_passwd!p}'
+                                        '&set EXITCODE=%ERRORLEVEL%&exit /B %EXITCODE%"', #cmd "feature", propagate the process exit code manually
+                                        hive_bin=params.hive_bin,
+                                        hive_metastore_db_type=params.hive_metastore_db_type,
+                                        hive_metastore_user_name=params.hive_metastore_user_name,
+                                        hive_metastore_user_passwd=params.hive_metastore_user_passwd)
+      try:
+        Execute(check_schema_created_cmd)
+      except Fail:
+        create_schema_cmd = format('cmd /c {hive_bin}\\hive.cmd --service schematool -initSchema '
+                                   '-dbType {hive_metastore_db_type} '
+                                   '-userName {hive_metastore_user_name} '
+                                   '-passWord {hive_metastore_user_passwd!p}',
+                                   hive_bin=params.hive_bin,
+                                   hive_metastore_db_type=params.hive_metastore_db_type,
+                                   hive_metastore_user_name=params.hive_metastore_user_name,
+                                   hive_metastore_user_passwd=params.hive_metastore_user_passwd)
+        Execute(create_schema_cmd,
+                user = params.hive_user,
+                logoutput=True
+        )
+
+  if name == "hiveserver2":
+    if params.hive_execution_engine == "tez":
+      # Init the tez app dir in hadoop
+      script_file = __file__.replace('/', os.sep)
+      cmd_file = os.path.normpath(os.path.join(os.path.dirname(script_file), "..", "files", "hiveTezSetup.cmd"))
+
+      Execute("cmd /c " + cmd_file, logoutput=True, user=params.hadoop_user)

http://git-wip-us.apache.org/repos/asf/ambari/blob/80b87400/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_client.py b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_client.py
new file mode 100644
index 0000000..83d82df
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_client.py
@@ -0,0 +1,68 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import sys
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.core.logger import Logger
+from hive import hive
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons import OSConst
+from resource_management.core.exceptions import ClientComponentHasNoStatus
+
+class HiveClient(Script):
+  def install(self, env):
+    import params
+    self.install_packages(env)
+    self.configure(env)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hive(name='client')
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HiveClientWindows(HiveClient):
+  pass
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HiveClientDefault(HiveClient):
+  def get_component_name(self):
+    return "hadoop-client"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Hive client Stack Upgrade pre-restart")
+
+    import params
+    env.set_params(params)
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "hive", params.version)
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-client", params.version)
+
+
+if __name__ == "__main__":
+  HiveClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/80b87400/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_interactive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_interactive.py b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_interactive.py
new file mode 100644
index 0000000..2ed3e3a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_interactive.py
@@ -0,0 +1,346 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+# Python Imports
+import os
+import glob
+from urlparse import urlparse
+
+# Resource Management and Common Imports
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.version import compare_versions
+from resource_management.core.resources.service import ServiceConfig
+from resource_management.core.resources.system import File, Directory
+from resource_management.core.source import Template, DownloadSource, InlineTemplate
+from resource_management.core.shell import as_user
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.functions.format import format
+from resource_management.core.shell import as_sudo
+from resource_management.core.shell import quote_bash_args
+from resource_management.core.logger import Logger
+from resource_management.core import utils
+from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster, setup_atlas_hook
+from resource_management.libraries.functions.security_commons import update_credential_provider_path
+from ambari_commons.constants import SERVICE
+
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+from hive import fill_conf_dir, jdbc_connector
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def hive_interactive(name=None):
+  pass
+
+"""
+Sets up the configs, jdbc connection and tarball copy to HDFS for Hive Server Interactive.
+"""
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def hive_interactive(name=None):
+  import params
+  MB_TO_BYTES = 1048576
+
+  # Create Hive User Dir
+  params.HdfsResource(params.hive_hdfs_user_dir,
+                      type="directory",
+                      action="create_on_execute",
+                      owner=params.hive_user,
+                      mode=params.hive_hdfs_user_mode
+                      )
+
+  # list of properties that should be excluded from the config
+  # this approach is a compromise against adding a dedicated config
+  # type for hive_server_interactive or needed config groups on a
+  # per component basis
+  exclude_list = ['hive.enforce.bucketing',
+                  'hive.enforce.sorting']
+
+  # List of configs to be excluded from hive2 client, but present in Hive2 server.
+  exclude_list_for_hive2_client = ['javax.jdo.option.ConnectionPassword',
+                                   'hadoop.security.credential.provider.path']
+
+  # Copy Tarballs in HDFS.
+  if params.stack_version_formatted_major and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.stack_version_formatted_major):
+    resource_created = copy_to_hdfs("tez_hive2",
+                 params.user_group,
+                 params.hdfs_user,
+                 file_mode=params.tarballs_mode,
+                 skip=params.sysprep_skip_copy_tarballs_hdfs)
+
+    if resource_created:
+      params.HdfsResource(None, action="execute")
+
+  Directory(params.hive_interactive_etc_dir_prefix,
+            mode=0755
+            )
+
+  Logger.info("Directories to fill with configs: %s" % str(params.hive_conf_dirs_list))
+  for conf_dir in params.hive_conf_dirs_list:
+    fill_conf_dir(conf_dir)
+
+  '''
+  As hive2/hive-site.xml only contains the new + the changed props compared to hive/hive-site.xml,
+  we need to merge hive/hive-site.xml and hive2/hive-site.xml and store it in hive2/hive-site.xml.
+  '''
+  merged_hive_interactive_site = {}
+  merged_hive_interactive_site.update(params.config['configurations']['hive-site'])
+  merged_hive_interactive_site.update(params.config['configurations']['hive-interactive-site'])
+  for item in exclude_list:
+    if item in merged_hive_interactive_site.keys():
+      del merged_hive_interactive_site[item]
+
+  '''
+  Config 'hive.llap.io.memory.size' calculated value in stack_advisor is in MB as of now. We need to
+  convert it to bytes before we write it down to config file.
+  '''
+  if 'hive.llap.io.memory.size' in merged_hive_interactive_site.keys():
+    hive_llap_io_mem_size_in_mb = merged_hive_interactive_site.get("hive.llap.io.memory.size")
+    hive_llap_io_mem_size_in_bytes = long(hive_llap_io_mem_size_in_mb) * MB_TO_BYTES
+    merged_hive_interactive_site['hive.llap.io.memory.size'] = hive_llap_io_mem_size_in_bytes
+    Logger.info("Converted 'hive.llap.io.memory.size' value from '{0} MB' to '{1} Bytes' before writing "
+                "it to config file.".format(hive_llap_io_mem_size_in_mb, hive_llap_io_mem_size_in_bytes))
+
+  '''
+  Hive2 doesn't have support for Atlas, we need to remove the Hook 'org.apache.atlas.hive.hook.HiveHook',
+  which would have come in config 'hive.exec.post.hooks' during the site merge logic, if Atlas is installed.
+  '''
+  # Generate atlas-application.properties.xml file
+  if params.enable_atlas_hook and params.stack_supports_atlas_hook_for_hive_interactive:
+    Logger.info("Setup for Atlas Hive2 Hook started.")
+
+    atlas_hook_filepath = os.path.join(params.hive_server_interactive_conf_dir, params.atlas_hook_filename)
+    setup_atlas_hook(SERVICE.HIVE, params.hive_atlas_application_properties, atlas_hook_filepath, params.hive_user, params.user_group)
+
+    Logger.info("Setup for Atlas Hive2 Hook done.")
+  else:
+    # Required for HDP 2.5 stacks
+    Logger.info("Skipping setup for Atlas Hook, as it is disabled/ not supported.")
+    remove_atlas_hook_if_exists(merged_hive_interactive_site)
+
+  '''
+  As tez_hive2/tez-site.xml only contains the new + the changed props compared to tez/tez-site.xml,
+  we need to merge tez/tez-site.xml and tez_hive2/tez-site.xml and store it in tez_hive2/tez-site.xml.
+  '''
+  merged_tez_interactive_site = {}
+  if 'tez-site' in params.config['configurations']:
+    merged_tez_interactive_site.update(params.config['configurations']['tez-site'])
+    Logger.info("Retrieved 'tez/tez-site' for merging with 'tez_hive2/tez-interactive-site'.")
+  else:
+    Logger.error("Tez's 'tez-site' couldn't be retrieved from passed-in configurations.")
+
+  merged_tez_interactive_site.update(params.config['configurations']['tez-interactive-site'])
+  XmlConfig("tez-site.xml",
+            conf_dir = params.tez_interactive_config_dir,
+            configurations = merged_tez_interactive_site,
+            configuration_attributes=params.config['configuration_attributes']['tez-interactive-site'],
+            owner = params.tez_interactive_user,
+            group = params.user_group,
+            mode = 0664)
+
+  '''
+  Merge properties from hiveserver2-interactive-site into hiveserver2-site
+  '''
+  merged_hiveserver2_interactive_site = {}
+  if 'hiveserver2-site' in params.config['configurations']:
+    merged_hiveserver2_interactive_site.update(params.config['configurations']['hiveserver2-site'])
+    Logger.info("Retrieved 'hiveserver2-site' for merging with 'hiveserver2-interactive-site'.")
+  else:
+    Logger.error("'hiveserver2-site' couldn't be retrieved from passed-in configurations.")
+  merged_hiveserver2_interactive_site.update(params.config['configurations']['hiveserver2-interactive-site'])
+
+
+  # Create config files under /etc/hive2/conf and /etc/hive2/conf/conf.server:
+  #   hive-site.xml
+  #   hive-env.sh
+  #   llap-daemon-log4j2.properties
+  #   llap-cli-log4j2.properties
+  #   hive-log4j2.properties
+  #   hive-exec-log4j2.properties
+  #   beeline-log4j2.properties
+
+  hive2_conf_dirs_list = params.hive_conf_dirs_list
+  hive2_client_conf_path = format("{stack_root}/current/{component_directory}/conf")
+
+  # Making copy of 'merged_hive_interactive_site' in 'merged_hive_interactive_site_copy', and deleting 'javax.jdo.option.ConnectionPassword'
+  # config from there, as Hive2 client shouldn't have that config.
+  merged_hive_interactive_site_copy = merged_hive_interactive_site.copy()
+  for item in exclude_list_for_hive2_client:
+    if item in merged_hive_interactive_site.keys():
+      del merged_hive_interactive_site_copy[item]
+
+  for conf_dir in hive2_conf_dirs_list:
+      mode_identified = 0644 if conf_dir == hive2_client_conf_path else 0600
+      if conf_dir == hive2_client_conf_path:
+        XmlConfig("hive-site.xml",
+                  conf_dir=conf_dir,
+                  configurations=merged_hive_interactive_site_copy,
+                  configuration_attributes=params.config['configuration_attributes']['hive-interactive-site'],
+                  owner=params.hive_user,
+                  group=params.user_group,
+                  mode=0644)
+      else:
+        merged_hive_interactive_site = update_credential_provider_path(merged_hive_interactive_site,
+                                                                  'hive-site',
+                                                                  os.path.join(conf_dir, 'hive-site.jceks'),
+                                                                  params.hive_user,
+                                                                  params.user_group
+        )
+        XmlConfig("hive-site.xml",
+                  conf_dir=conf_dir,
+                  configurations=merged_hive_interactive_site,
+                  configuration_attributes=params.config['configuration_attributes']['hive-interactive-site'],
+                  owner=params.hive_user,
+                  group=params.user_group,
+                  mode=0600)
+      XmlConfig("hiveserver2-site.xml",
+                conf_dir=conf_dir,
+                configurations=merged_hiveserver2_interactive_site,
+                configuration_attributes=params.config['configuration_attributes']['hiveserver2-interactive-site'],
+                owner=params.hive_user,
+                group=params.user_group,
+                mode=mode_identified)
+
+      hive_server_interactive_conf_dir = conf_dir
+
+      File(format("{hive_server_interactive_conf_dir}/hive-env.sh"),
+           owner=params.hive_user,
+           group=params.user_group,
+           mode=mode_identified,
+           content=InlineTemplate(params.hive_interactive_env_sh_template))
+
+      llap_daemon_log4j_filename = 'llap-daemon-log4j2.properties'
+      File(format("{hive_server_interactive_conf_dir}/{llap_daemon_log4j_filename}"),
+           mode=mode_identified,
+           group=params.user_group,
+           owner=params.hive_user,
+           content=InlineTemplate(params.llap_daemon_log4j))
+
+      llap_cli_log4j2_filename = 'llap-cli-log4j2.properties'
+      File(format("{hive_server_interactive_conf_dir}/{llap_cli_log4j2_filename}"),
+           mode=mode_identified,
+           group=params.user_group,
+           owner=params.hive_user,
+           content=InlineTemplate(params.llap_cli_log4j2))
+
+      hive_log4j2_filename = 'hive-log4j2.properties'
+      File(format("{hive_server_interactive_conf_dir}/{hive_log4j2_filename}"),
+         mode=mode_identified,
+         group=params.user_group,
+         owner=params.hive_user,
+         content=InlineTemplate(params.hive_log4j2))
+
+      hive_exec_log4j2_filename = 'hive-exec-log4j2.properties'
+      File(format("{hive_server_interactive_conf_dir}/{hive_exec_log4j2_filename}"),
+         mode=mode_identified,
+         group=params.user_group,
+         owner=params.hive_user,
+         content=InlineTemplate(params.hive_exec_log4j2))
+
+      beeline_log4j2_filename = 'beeline-log4j2.properties'
+      File(format("{hive_server_interactive_conf_dir}/{beeline_log4j2_filename}"),
+         mode=mode_identified,
+         group=params.user_group,
+         owner=params.hive_user,
+         content=InlineTemplate(params.beeline_log4j2))
+
+      File(os.path.join(hive_server_interactive_conf_dir, "hadoop-metrics2-hiveserver2.properties"),
+           owner=params.hive_user,
+           group=params.user_group,
+           mode=mode_identified,
+           content=Template("hadoop-metrics2-hiveserver2.properties.j2")
+           )
+
+      File(format("{hive_server_interactive_conf_dir}/hadoop-metrics2-llapdaemon.properties"),
+           owner=params.hive_user,
+           group=params.user_group,
+           mode=mode_identified,
+           content=Template("hadoop-metrics2-llapdaemon.j2"))
+
+      File(format("{hive_server_interactive_conf_dir}/hadoop-metrics2-llaptaskscheduler.properties"),
+           owner=params.hive_user,
+           group=params.user_group,
+           mode=mode_identified,
+           content=Template("hadoop-metrics2-llaptaskscheduler.j2"))
+
+
+  # On some OS this folder could be not exists, so we will create it before pushing there files
+  Directory(params.limits_conf_dir,
+            create_parents = True,
+            owner='root',
+            group='root')
+
+  File(os.path.join(params.limits_conf_dir, 'hive.conf'),
+       owner='root',
+       group='root',
+       mode=0644,
+       content=Template("hive.conf.j2"))
+
+  if not os.path.exists(params.target_hive_interactive):
+    jdbc_connector(params.target_hive_interactive, params.hive_intaractive_previous_jdbc_jar)
+
+  File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
+       content = DownloadSource(format("{jdk_location}{check_db_connection_jar_name}")),
+       mode = 0644)
+  File(params.start_hiveserver2_interactive_path,
+       mode=0755,
+       content=Template(format('{start_hiveserver2_interactive_script}')))
+
+  Directory(params.hive_pid_dir,
+            create_parents=True,
+            cd_access='a',
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0755)
+  Directory(params.hive_log_dir,
+            create_parents=True,
+            cd_access='a',
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0755)
+  Directory(params.hive_interactive_var_lib,
+            create_parents=True,
+            cd_access='a',
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0755)
+
+"""
+Remove 'org.apache.atlas.hive.hook.HiveHook' value from Hive2/hive-site.xml config 'hive.exec.post.hooks', if exists.
+"""
+def remove_atlas_hook_if_exists(merged_hive_interactive_site):
+  if 'hive.exec.post.hooks' in merged_hive_interactive_site.keys():
+    existing_hive_exec_post_hooks = merged_hive_interactive_site.get('hive.exec.post.hooks')
+    if existing_hive_exec_post_hooks:
+      hook_splits = existing_hive_exec_post_hooks.split(",")
+      updated_hook_splits = [hook for hook in hook_splits if not hook.strip() == 'org.apache.atlas.hive.hook.HiveHook']
+      updated_hooks_str = ",".join((str(hook)).strip() for hook in updated_hook_splits)
+      if updated_hooks_str != existing_hive_exec_post_hooks:
+        merged_hive_interactive_site['hive.exec.post.hooks'] = updated_hooks_str
+        Logger.info("Updated Hive2/hive-site.xml 'hive.exec.post.hooks' value from : '{0}' to : '{1}'"
+                    .format(existing_hive_exec_post_hooks, updated_hooks_str))
+      else:
+        Logger.info("No change done to Hive2/hive-site.xml 'hive.exec.post.hooks' value.")
+  else:
+      Logger.debug("'hive.exec.post.hooks' doesn't exist in Hive2/hive-site.xml")

http://git-wip-us.apache.org/repos/asf/ambari/blob/80b87400/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_metastore.py b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_metastore.py
new file mode 100644
index 0000000..8b69e45
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_metastore.py
@@ -0,0 +1,263 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+
+from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Execute, Directory
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.constants import Direction
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.security_commons import build_expectations
+from resource_management.libraries.functions.security_commons import cached_kinit_executor
+from resource_management.libraries.functions.security_commons import get_params_from_filesystem
+from resource_management.libraries.functions.security_commons import validate_security_config_properties
+from resource_management.libraries.functions.security_commons import FILE_TYPE_XML
+from resource_management.core.resources.system import File
+
+from hive import create_metastore_schema, hive, jdbc_connector
+from hive_service import hive_service
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons import OSConst
+
+# the legacy conf.server location in previous stack versions
+LEGACY_HIVE_SERVER_CONF = "/etc/hive/conf.server"
+
+class HiveMetastore(Script):
+  def install(self, env):
+    import params
+    self.install_packages(env)
+
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    # writing configurations on start required for securtity
+    self.configure(env)
+    if params.init_metastore_schema:
+      create_metastore_schema() # execute without config lock
+
+    hive_service('metastore', action='start', upgrade_type=upgrade_type)
+
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    hive_service('metastore', action='stop', upgrade_type=upgrade_type)
+
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hive(name = 'metastore')
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HiveMetastoreWindows(HiveMetastore):
+  def status(self, env):
+    import status_params
+    from resource_management.libraries.functions import check_windows_service_status
+    check_windows_service_status(status_params.hive_metastore_win_service_name)
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HiveMetastoreDefault(HiveMetastore):
+  def get_component_name(self):
+    return "hive-metastore"
+
+
+  def status(self, env):
+    import status_params
+    from resource_management.libraries.functions import check_process_status
+    env.set_params(status_params)
+
+    # Recursively check all existing gmetad pid files
+    check_process_status(status_params.hive_metastore_pid)
+
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Metastore Stack Upgrade pre-restart")
+    import params
+
+    env.set_params(params)
+
+    is_upgrade = params.upgrade_direction == Direction.UPGRADE
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "hive", params.version)
+      stack_select.select("hive-metastore", params.version)
+
+    if is_upgrade and params.stack_version_formatted_major and \
+            check_stack_feature(StackFeature.HIVE_METASTORE_UPGRADE_SCHEMA, params.stack_version_formatted_major):
+      self.upgrade_schema(env)
+
+
+  def security_status(self, env):
+    import status_params
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      props_value_check = {"hive.server2.authentication": "KERBEROS",
+                           "hive.metastore.sasl.enabled": "true",
+                           "hive.security.authorization.enabled": "true"}
+      props_empty_check = ["hive.metastore.kerberos.keytab.file",
+                           "hive.metastore.kerberos.principal"]
+
+      props_read_check = ["hive.metastore.kerberos.keytab.file"]
+      hive_site_props = build_expectations('hive-site', props_value_check, props_empty_check,
+                                            props_read_check)
+
+      hive_expectations ={}
+      hive_expectations.update(hive_site_props)
+
+      security_params = get_params_from_filesystem(status_params.hive_conf_dir,
+                                                   {'hive-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, hive_expectations)
+      if not result_issues: # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if 'hive-site' not in security_params \
+            or 'hive.metastore.kerberos.keytab.file' not in security_params['hive-site'] \
+            or 'hive.metastore.kerberos.principal' not in security_params['hive-site']:
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hive_user,
+                                security_params['hive-site']['hive.metastore.kerberos.keytab.file'],
+                                security_params['hive-site']['hive.metastore.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+
+  def upgrade_schema(self, env):
+    """
+    Executes the schema upgrade binary.  This is its own function because it could
+    be called as a standalone task from the upgrade pack, but is safe to run it for each
+    metastore instance. The schema upgrade on an already upgraded metastore is a NOOP.
+
+    The metastore schema upgrade requires a database driver library for most
+    databases. During an upgrade, it's possible that the library is not present,
+    so this will also attempt to copy/download the appropriate driver.
+
+    This function will also ensure that configurations are written out to disk before running
+    since the new configs will most likely not yet exist on an upgrade.
+
+    Should not be invoked for a DOWNGRADE; Metastore only supports schema upgrades.
+    """
+    Logger.info("Upgrading Hive Metastore Schema")
+    import status_params
+    import params
+    env.set_params(params)
+
+    # ensure that configurations are written out before trying to upgrade the schema
+    # since the schematool needs configs and doesn't know how to use the hive conf override
+    self.configure(env)
+
+    if params.security_enabled:
+      cached_kinit_executor(status_params.kinit_path_local,
+        status_params.hive_user,
+        params.hive_metastore_keytab_path,
+        params.hive_metastore_principal,
+        status_params.hostname,
+        status_params.tmp_dir)
+      
+    # ensure that the JDBC drive is present for the schema tool; if it's not
+    # present, then download it first
+    if params.hive_jdbc_driver in params.hive_jdbc_drivers_list:
+      target_directory = format("{stack_root}/{version}/hive/lib")
+
+      # download it if it does not exist
+      if not os.path.exists(params.source_jdbc_file):
+        jdbc_connector(params.hive_jdbc_target, params.hive_previous_jdbc_jar)
+
+      target_directory_and_filename = os.path.join(target_directory, os.path.basename(params.source_jdbc_file))
+
+      if params.sqla_db_used:
+        target_native_libs_directory = format("{target_directory}/native/lib64")
+
+        Execute(format("yes | {sudo} cp {jars_in_hive_lib} {target_directory}"))
+
+        Directory(target_native_libs_directory, create_parents = True)
+
+        Execute(format("yes | {sudo} cp {libs_in_hive_lib} {target_native_libs_directory}"))
+
+        Execute(format("{sudo} chown -R {hive_user}:{user_group} {hive_lib}/*"))
+      else:
+        # copy the JDBC driver from the older metastore location to the new location only
+        # if it does not already exist
+        if not os.path.exists(target_directory_and_filename):
+          Execute(('cp', params.source_jdbc_file, target_directory),
+            path=["/bin", "/usr/bin/"], sudo = True)
+
+      File(target_directory_and_filename, mode = 0644)
+
+    # build the schema tool command
+    binary = format("{hive_schematool_ver_bin}/schematool")
+
+    # the conf.server directory changed locations between stack versions
+    # since the configurations have not been written out yet during an upgrade
+    # we need to choose the original legacy location
+    schematool_hive_server_conf_dir = params.hive_server_conf_dir
+    if params.current_version is not None:
+      current_version = format_stack_version(params.current_version)
+      if not(check_stack_feature(StackFeature.CONFIG_VERSIONING, current_version)):
+        schematool_hive_server_conf_dir = LEGACY_HIVE_SERVER_CONF
+
+    env_dict = {
+      'HIVE_CONF_DIR': schematool_hive_server_conf_dir
+    }
+
+    command = format("{binary} -dbType {hive_metastore_db_type} -upgradeSchema")
+    Execute(command, user=params.hive_user, tries=1, environment=env_dict, logoutput=True)
+    
+  def get_log_folder(self):
+    import params
+    return params.hive_log_dir
+
+  def get_user(self):
+    import params
+    return params.hive_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.hive_metastore_pid]
+
+
+if __name__ == "__main__":
+  HiveMetastore().execute()


Mime
View raw message