ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From alejan...@apache.org
Subject ambari git commit: AMBARI-12113. Cluster deployment is missing tez.tar.gz in HDFS since service responsible for uploading tarball is not co-hosted with Tez Client (alejandro)
Date Thu, 25 Jun 2015 17:12:19 GMT
Repository: ambari
Updated Branches:
  refs/heads/branch-2.1 d4dfe0d3e -> 0943ed481


AMBARI-12113. Cluster deployment is missing tez.tar.gz in HDFS since service responsible for
uploading tarball is not co-hosted with Tez Client (alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0943ed48
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0943ed48
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0943ed48

Branch: refs/heads/branch-2.1
Commit: 0943ed48126d74e6c927c6e29520d7a3f657475a
Parents: d4dfe0d
Author: Alejandro Fernandez <afernandez@hortonworks.com>
Authored: Thu Jun 25 10:11:54 2015 -0700
Committer: Alejandro Fernandez <afernandez@hortonworks.com>
Committed: Thu Jun 25 10:11:54 2015 -0700

----------------------------------------------------------------------
 .../libraries/functions/copy_tarball.py         | 34 ++++++++-----
 .../state/stack/upgrade/ClusterGrouping.java    |  4 ++
 .../server/state/stack/upgrade/ExecuteTask.java |  2 +-
 .../state/stack/upgrade/TaskWrapperBuilder.java | 10 ++++
 .../0.12.0.2.0/package/scripts/params_linux.py  |  3 +-
 .../package/scripts/job_history_server.py       |  3 +-
 .../common-services/TEZ/0.4.0.2.1/metainfo.xml  | 23 +++++++++
 .../0.4.0.2.1/package/scripts/pre_upgrade.py    | 53 ++++++++++++++++++++
 .../common-services/YARN/2.1.0.2.0/metainfo.xml |  7 +++
 .../2.1.0.2.0/package/scripts/historyserver.py  | 18 +++++--
 .../2.1.0.2.0/package/scripts/params_linux.py   |  3 +-
 .../package/scripts/resourcemanager.py          |  9 +---
 .../stacks/HDP/2.1/role_command_order.json      |  2 +-
 .../stacks/HDP/2.2/upgrades/upgrade-2.3.xml     |  7 +++
 .../stacks/HDP/2.3/upgrades/upgrade-2.3.xml     |  7 +++
 .../stacks/2.0.6/YARN/test_historyserver.py     | 48 +++++++++++++++++-
 .../stacks/2.0.6/YARN/test_resourcemanager.py   | 33 ------------
 17 files changed, 202 insertions(+), 64 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/0943ed48/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
b/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
index 8eab473..ad4aadc 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
@@ -95,7 +95,8 @@ def _get_single_version_from_hdp_select():
 
   return hdp_version
 
-def copy_to_hdfs(name, user_group, owner, file_mode=0444, custom_source_file=None, custom_dest_file=None,
force_execute=False):
+def copy_to_hdfs(name, user_group, owner, file_mode=0444, custom_source_file=None, custom_dest_file=None,
force_execute=False,
+                 use_ru_version_during_ru=True):
   """
   :param name: Tarball name, e.g., tez, hive, pig, sqoop.
   :param user_group: Group to own the directory.
@@ -104,18 +105,20 @@ def copy_to_hdfs(name, user_group, owner, file_mode=0444, custom_source_file=Non
   :param custom_source_file: Override the source file path
   :param custom_dest_file: Override the destination file path
   :param force_execute: If true, will execute the HDFS commands immediately, otherwise, will
defer to the calling function.
+  :param use_ru_version_during_ru: If true, will use the version going to during RU. Otherwise,
use the CURRENT (source) version.
   :return: Will return True if successful, otherwise, False.
   """
   import params
 
   if params.stack_name is None or params.stack_name.upper() not in TARBALL_MAP:
     Logger.error("Cannot copy {0} tarball to HDFS because stack {1} does not support this
operation.".format(str(name), str(params.stack_name)))
-    return -1
+    return False
 
   if name is None or name.lower() not in TARBALL_MAP[params.stack_name.upper()]:
     Logger.warning("Cannot copy tarball to HDFS because {0} is not supported in stack {1}
for this operation.".format(str(name), str(params.stack_name)))
-    return -1
+    return False
 
+  Logger.info("Called copy_to_hdfs tarball: {0}".format(name))
   (source_file, dest_file) = TARBALL_MAP[params.stack_name.upper()][name.lower()]
 
   if custom_source_file is not None:
@@ -127,16 +130,22 @@ def copy_to_hdfs(name, user_group, owner, file_mode=0444, custom_source_file=Non
   upgrade_direction = default("/commandParams/upgrade_direction", None)
   is_rolling_upgrade = upgrade_direction is not None
   current_version = default("/hostLevelParams/current_version", None)
+  Logger.info("Default version is {0}".format(current_version))
   if is_rolling_upgrade:
-    # This is the version going to. In the case of a downgrade, it is the lower version.
-    current_version = default("/commandParams/version", None)
-  elif current_version is None:
-    # During normal operation, the first installation of services won't yet know about the
version, so must rely
-    # on hdp-select to get it.
-    hdp_version = _get_single_version_from_hdp_select()
-    if hdp_version:
-      Logger.info("Will use stack version {0}".format(hdp_version))
-      current_version = hdp_version
+    if use_ru_version_during_ru:
+      # This is the version going to. In the case of a downgrade, it is the lower version.
+      current_version = default("/commandParams/version", None)
+      Logger.info("Because this is a Rolling Upgrade, will use version {0}".format(current_version))
+    else:
+      Logger.info("This is a Rolling Upgrade, but keep the version unchanged.")
+  else:
+    if current_version is None:
+      # During normal operation, the first installation of services won't yet know about
the version, so must rely
+      # on hdp-select to get it.
+      hdp_version = _get_single_version_from_hdp_select()
+      if hdp_version:
+        Logger.info("Will use stack version {0}".format(hdp_version))
+        current_version = hdp_version
 
   if current_version is None:
     message_suffix = "during rolling %s" % str(upgrade_direction) if is_rolling_upgrade else
""
@@ -145,6 +154,7 @@ def copy_to_hdfs(name, user_group, owner, file_mode=0444, custom_source_file=Non
 
   source_file = source_file.replace(STACK_VERSION_PATTERN, current_version)
   dest_file = dest_file.replace(STACK_VERSION_PATTERN, current_version)
+  Logger.info("Source file: {0} , Dest file in HDFS: {1}".format(source_file, dest_file))
 
   if not os.path.exists(source_file):
     Logger.warning("WARNING. Cannot copy {0} tarball because file does not exist: {1} . It
is possible that this component is not installed on this host.".format(str(name), str(source_file)))

http://git-wip-us.apache.org/repos/asf/ambari/blob/0943ed48/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java
b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java
index c10fffb..80badf4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java
@@ -207,6 +207,10 @@ public class ClusterGrouping extends Grouping {
         if (null != et.hosts && "master".equals(et.hosts) && null != hosts.master)
{
           realHosts = Collections.singleton(hosts.master);
         }
+        // Pick a random host.
+        if (null != et.hosts && "any".equals(et.hosts) && !hosts.hosts.isEmpty())
{
+          realHosts = Collections.singleton(hosts.hosts.iterator().next());
+        }
 
         return new StageWrapper(
             StageWrapper.Type.RU_TASKS,

http://git-wip-us.apache.org/repos/asf/ambari/blob/0943ed48/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ExecuteTask.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ExecuteTask.java
b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ExecuteTask.java
index e287f7e..59af413 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ExecuteTask.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ExecuteTask.java
@@ -38,7 +38,7 @@ public class ExecuteTask extends Task {
   private Task.Type type = Task.Type.EXECUTE;
 
   /**
-   * Host to run on, if not specified, run on all. Other values include "master"
+   * Host to run on, if not specified, run on all. Other values include "master", "any"
    */
   @XmlAttribute
   public String hosts;

http://git-wip-us.apache.org/repos/asf/ambari/blob/0943ed48/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapperBuilder.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapperBuilder.java
b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapperBuilder.java
index bb0b8d6..719078f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapperBuilder.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapperBuilder.java
@@ -57,6 +57,16 @@ public class TaskWrapperBuilder {
             continue;
           }
         }
+        // Pick a random host.
+        if (((ExecuteTask) t).hosts != null && ((ExecuteTask) t).hosts.equalsIgnoreCase("any"))
{
+          if (hostsType.hosts != null && !hostsType.hosts.isEmpty()) {
+            collection.add(new TaskWrapper(service, component, Collections.singleton(hostsType.hosts.iterator().next()),
t));
+            continue;
+          } else {
+            LOG.error(MessageFormat.format("Found an Execute task for {0} and {1} meant to
run on a any host but could not find host to run on. Skipping this task.", service, component));
+            continue;
+          }
+        }
       }
 
       collection.add(new TaskWrapper(service, component, hostsType.hosts, t));

http://git-wip-us.apache.org/repos/asf/ambari/blob/0943ed48/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
index 935f3a2..b80ecf4 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
@@ -53,7 +53,8 @@ stack_is_hdp21 = Script.is_hdp_stack_greater_or_equal("2.0") and Script.is_hdp_s
 # this is not available on INSTALL action because hdp-select is not available
 hdp_stack_version = functions.get_hdp_version('hive-server2')
 
-# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade.
+# It cannot be used during the initial Cluser Install because the version is not yet known.
 version = default("/commandParams/version", None)
 
 # current host stack version

http://git-wip-us.apache.org/repos/asf/ambari/blob/0943ed48/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/job_history_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/job_history_server.py
b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/job_history_server.py
index 4b0bbfa..8cdafc4 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/job_history_server.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/job_history_server.py
@@ -80,7 +80,8 @@ class JobHistoryServer(Script):
 
       # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency
on Tez, so it does not
       # need to copy the tarball, otherwise, copy it.
-      if compare_versions(format_hdp_stack_version(params.version), '2.3.0.0') < 0:
+
+      if params.version and compare_versions(format_hdp_stack_version(params.version), '2.3.0.0')
< 0:
         resource_created = copy_to_hdfs("tez", params.user_group, params.hdfs_user)
         if resource_created:
           params.HdfsResource(None, action="execute")

http://git-wip-us.apache.org/repos/asf/ambari/blob/0943ed48/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/metainfo.xml b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/metainfo.xml
index f42af02..8b3a153 100644
--- a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/metainfo.xml
@@ -47,6 +47,29 @@
               <dictionaryName>tez-env</dictionaryName>
             </configFile>
           </configFiles>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>YARN/YARN_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
         </component>
       </components>
       <osSpecifics>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0943ed48/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/pre_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/pre_upgrade.py
b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/pre_upgrade.py
new file mode 100644
index 0000000..7731bc7
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/pre_upgrade.py
@@ -0,0 +1,53 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions.version import compare_versions
+from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
+from resource_management.core.exceptions import Fail
+
+from resource_management.core.logger import Logger
+
+class TezPreUpgrade(Script):
+
+  def prepare(self, env):
+    """
+    During the "Upgrade" direction of a Rolling Upgrade, it is necessary to ensure that the
older tez tarball
+    has been copied to HDFS. This is an additional check for added robustness.
+    """
+    import params
+    env.set_params(params)
+
+    Logger.info("Before starting Rolling Upgrade, check if tez tarball has been copied to
HDFS.")
+
+    if params.hdp_stack_version and compare_versions(params.hdp_stack_version, '2.2.0.0')
>= 0:
+      Logger.info("Stack version {0} is sufficient to check if need to copy tez.tar.gz to
HDFS.".format(params.hdp_stack_version))
+
+      # Force it to copy the current version of the tez tarball, rather than the version
the RU will go to.
+      resource_created = copy_to_hdfs("tez", params.user_group, params.hdfs_user, use_ru_version_during_ru=False)
+      if resource_created:
+        params.HdfsResource(None, action="execute")
+      else:
+        raise Fail("Could not copy tez tarball to HDFS.")
+
+if __name__ == "__main__":
+  TezPreUpgrade().execute()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/0943ed48/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/metainfo.xml
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/metainfo.xml
index 01c3c26..969c2a7 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/metainfo.xml
@@ -179,6 +179,13 @@
                 <enabled>true</enabled>
               </auto-deploy>
             </dependency>
+            <dependency>
+              <name>TEZ/TEZ_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
           </dependencies>
           <commandScript>
             <script>scripts/historyserver.py</script>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0943ed48/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
index af37153..16e34d4 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
@@ -33,6 +33,7 @@ from resource_management.libraries.functions.security_commons import build_expec
 from resource_management.core.source import Template
 from resource_management.core.logger import Logger
 
+from install_jars import install_tez_jars
 from yarn import yarn
 from service import service
 from ambari_commons import OSConst
@@ -79,18 +80,25 @@ class HistoryServerDefault(HistoryServer):
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0')
>= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
       hdp_select.select("hadoop-mapreduce-historyserver", params.version)
+      # MC Hammer said, "Can't touch this"
       copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user)
+      copy_to_hdfs("tez", params.user_group, params.hdfs_user)
       params.HdfsResource(None, action="execute")
 
-
   def start(self, env, rolling_restart=False):
     import params
     env.set_params(params)
     self.configure(env) # FOR SECURITY
-    
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0')
>= 0:
-      copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user)
-      params.HdfsResource(None, action="execute")
+
+    if params.hdp_stack_version_major and compare_versions(params.hdp_stack_version_major,
'2.2.0.0') >= 0:
+      # MC Hammer said, "Can't touch this"
+      resource_created = copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user)
+      resource_created = copy_to_hdfs("tez", params.user_group, params.hdfs_user) or resource_created
+      if resource_created:
+        params.HdfsResource(None, action="execute")
+    else:
+      # In HDP 2.1, tez.tar.gz was copied to a different folder in HDFS.
+      install_tez_jars()
 
     service('historyserver', action='start', serviceName='mapreduce')
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/0943ed48/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
index d74340f..8fc554b 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
@@ -58,7 +58,8 @@ stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 hdp_stack_version_major = format_hdp_stack_version(stack_version_unformatted)
 hdp_stack_version = functions.get_hdp_version('hadoop-yarn-resourcemanager')
 
-# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade.
+# It cannot be used during the initial Cluser Install because the version is not yet known.
 version = default("/commandParams/version", None)
 
 hostname = config['hostname']

http://git-wip-us.apache.org/repos/asf/ambari/blob/0943ed48/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
index 88a3cba..e67f1ce 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
@@ -24,7 +24,6 @@ from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions import conf_select
 from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions.check_process_status import check_process_status
-from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
 from resource_management.libraries.functions.security_commons import build_expectations,
\
@@ -113,13 +112,7 @@ class ResourcemanagerDefault(Resourcemanager):
     env.set_params(params)
     self.configure(env) # FOR SECURITY
     if params.has_ranger_admin and params.is_supported_yarn_ranger:
-      setup_ranger_yarn() #Ranger Yarn Plugin related calls 
-    if not Script.is_hdp_stack_greater_or_equal("2.2"):
-      install_tez_jars()
-    else:
-      resource_created = copy_to_hdfs("tez", params.user_group, params.hdfs_user)
-      if resource_created:
-        params.HdfsResource(None, action="execute")
+      setup_ranger_yarn() #Ranger Yarn Plugin related calls
     service('resourcemanager', action='start')
 
   def status(self, env):

http://git-wip-us.apache.org/repos/asf/ambari/blob/0943ed48/ambari-server/src/main/resources/stacks/HDP/2.1/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1/role_command_order.json b/ambari-server/src/main/resources/stacks/HDP/2.1/role_command_order.json
index ec38ee2..3adf5e1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1/role_command_order.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1/role_command_order.json
@@ -11,7 +11,7 @@
     "STORM_SERVICE_CHECK-SERVICE_CHECK": ["NIMBUS-START", "SUPERVISOR-START", "STORM_UI_SERVER-START",
         "DRPC_SERVER-START"],
     "NIMBUS-STOP" : ["SUPERVISOR-STOP", "STORM_UI_SERVER-STOP", "DRPC_SERVER-STOP"],
-    "TEZ_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"]
+    "TEZ_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "HISTORYSERVER-START"]
   },
   "_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster",
   "optional_no_glusterfs": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/0943ed48/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml
index d581736..696c7bb 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml
@@ -56,6 +56,13 @@
         </task>
       </execute-stage>
 
+      <execute-stage service="TEZ" component="TEZ_CLIENT" title="Check Tez Tarball">
+        <task xsi:type="execute" hosts="any">
+          <script>scripts/pre_upgrade.py</script>
+          <function>prepare</function>
+        </task>
+      </execute-stage>
+
     </group>
 
     <group name="ZOOKEEPER" title="ZooKeeper">

http://git-wip-us.apache.org/repos/asf/ambari/blob/0943ed48/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml
index 8c8904d..1884f2e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml
@@ -66,6 +66,13 @@
         </task>
       </execute-stage>
 
+      <execute-stage service="TEZ" component="TEZ_CLIENT" title="Check Tez Tarball">
+        <task xsi:type="execute" hosts="any"> <!-- hosts="any" -->
+          <script>scripts/pre_upgrade.py</script>
+          <function>prepare</function>
+        </task>
+      </execute-stage>
+
     </group>
 
     <group name="ZOOKEEPER" title="ZooKeeper">

http://git-wip-us.apache.org/repos/asf/ambari/blob/0943ed48/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
index 3457315..33f4956 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
@@ -57,6 +57,51 @@ class TestHistoryServer(RMFTestCase):
 
     pid_check_cmd = 'ls /var/run/hadoop-mapreduce/mapred/mapred-mapred-historyserver.pid
>/dev/null 2>&1 && ps -p `cat /var/run/hadoop-mapreduce/mapred/mapred-mapred-historyserver.pid`
>/dev/null 2>&1'
 
+    self.assertResourceCalled("HdfsResource", "/apps/tez/",
+                          type="directory",
+                          action=["create_on_execute"],
+                          user=u"hdfs",
+                          owner=u"tez",
+                          mode=493,
+                          hadoop_bin_dir="/usr/bin",
+                          hadoop_conf_dir="/etc/hadoop/conf",
+                          hdfs_site=self.getConfig()["configurations"]["hdfs-site"],
+                          default_fs=u'hdfs://c6401.ambari.apache.org:8020',
+                          security_enabled=False,
+                          kinit_path_local="/usr/bin/kinit",
+                          keytab=UnknownConfigurationMock(),
+                          principal_name=UnknownConfigurationMock()
+                          )
+
+    self.assertResourceCalled("HdfsResource", "/apps/tez/lib/",
+                              type="directory",
+                              action=["create_on_execute"],
+                              user=u'hdfs',
+                              owner=u"tez",
+                              mode=493,
+                              hadoop_bin_dir="/usr/bin",
+                              hadoop_conf_dir="/etc/hadoop/conf",
+                              hdfs_site=self.getConfig()["configurations"]["hdfs-site"],
+                              default_fs=u'hdfs://c6401.ambari.apache.org:8020',
+                              security_enabled=False,
+                              kinit_path_local="/usr/bin/kinit",
+                              keytab=UnknownConfigurationMock(),
+                              principal_name=UnknownConfigurationMock()
+    )
+
+    self.assertResourceCalled("HdfsResource", None,
+                              action=['execute'],
+                              user=u'hdfs',
+                              hadoop_bin_dir="/usr/bin",
+                              hadoop_conf_dir="/etc/hadoop/conf",
+                              hdfs_site=self.getConfig()["configurations"]["hdfs-site"],
+                              default_fs=u'hdfs://c6401.ambari.apache.org:8020',
+                              security_enabled=False,
+                              kinit_path_local="/usr/bin/kinit",
+                              keytab=UnknownConfigurationMock(),
+                              principal_name=UnknownConfigurationMock()
+                          )
+
     self.assertResourceCalled('File', '/var/run/hadoop-mapreduce/mapred/mapred-mapred-historyserver.pid',
                               not_if=pid_check_cmd,
                               action=['delete'])
@@ -710,7 +755,8 @@ class TestHistoryServer(RMFTestCase):
                        mocks_dict = mocks_dict)
 
     self.assertResourceCalled('Execute', 'hdp-select set hadoop-mapreduce-historyserver %s'
% version)
-    copy_to_hdfs_mock.assert_called_with("mapreduce", "hadoop", "hdfs")
+    copy_to_hdfs_mock.assert_called_with("tez", "hadoop", "hdfs")
+
     self.assertResourceCalled('HdfsResource', None,
         security_enabled = False,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',

http://git-wip-us.apache.org/repos/asf/ambari/blob/0943ed48/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
index 94e26b5..fbde404 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
@@ -59,39 +59,6 @@ class TestResourceManager(RMFTestCase):
 
     self.assert_configure_default()
 
-    self.assertResourceCalled('HdfsResource', '/apps/tez/',
-        security_enabled = False,
-        hadoop_bin_dir = '/usr/bin',
-        keytab = UnknownConfigurationMock(),
-        kinit_path_local = '/usr/bin/kinit',
-        user = 'hdfs',
-        owner = 'tez',
-        hadoop_conf_dir = '/etc/hadoop/conf',
-        type = 'directory',
-        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'],
principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
-        mode = 0755,
-    )
-    self.assertResourceCalled('HdfsResource', '/apps/tez/lib/',
-        security_enabled = False,
-        hadoop_bin_dir = '/usr/bin',
-        keytab = UnknownConfigurationMock(),
-        kinit_path_local = '/usr/bin/kinit',
-        user = 'hdfs',
-        owner = 'tez',
-        hadoop_conf_dir = '/etc/hadoop/conf',
-        type = 'directory',
-        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'],
principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
-        mode = 0755,
-    )
-    self.assertResourceCalled('HdfsResource', None,
-        security_enabled = False,
-        hadoop_bin_dir = '/usr/bin',
-        keytab = UnknownConfigurationMock(),
-        kinit_path_local = '/usr/bin/kinit',
-        user = 'hdfs',
-        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'],
principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
-        hadoop_conf_dir = '/etc/hadoop/conf',
-    )
     self.assertResourceCalled('File', '/var/run/hadoop-yarn/yarn/yarn-yarn-resourcemanager.pid',
                               action = ['delete'],
                               not_if = 'ls /var/run/hadoop-yarn/yarn/yarn-yarn-resourcemanager.pid
>/dev/null 2>&1 && ps -p `cat /var/run/hadoop-yarn/yarn/yarn-yarn-resourcemanager.pid`
>/dev/null 2>&1',


Mime
View raw message