ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jlun...@apache.org
Subject ambari git commit: AMBARI-14241: RU on non-HDFS filesystems, native commands like hdfs dfsadmin fail (jluniya)
Date Mon, 07 Dec 2015 23:21:47 GMT
Repository: ambari
Updated Branches:
  refs/heads/branch-2.2 893955a59 -> 4d0260231


AMBARI-14241: RU on non-HDFS filesystems, native commands like hdfs dfsadmin fail (jluniya)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4d026023
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4d026023
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4d026023

Branch: refs/heads/branch-2.2
Commit: 4d02602314f61235220323cbd392b508523b14c2
Parents: 893955a
Author: Jayush Luniya <jluniya@hortonworks.com>
Authored: Mon Dec 7 15:17:20 2015 -0800
Committer: Jayush Luniya <jluniya@hortonworks.com>
Committed: Mon Dec 7 15:21:11 2015 -0800

----------------------------------------------------------------------
 .../package/scripts/datanode_upgrade.py         | 17 +++--
 .../2.1.0.2.0/package/scripts/hdfs_namenode.py  |  8 +-
 .../package/scripts/journalnode_upgrade.py      |  4 +-
 .../HDFS/2.1.0.2.0/package/scripts/namenode.py  | 13 ++--
 .../package/scripts/namenode_upgrade.py         | 28 +++----
 .../HDFS/2.1.0.2.0/package/scripts/utils.py     | 18 ++++-
 .../package/scripts/nodemanager_upgrade.py      |  4 +-
 .../python/stacks/2.0.6/HDFS/test_datanode.py   | 32 +++++++-
 .../python/stacks/2.0.6/HDFS/test_namenode.py   | 79 ++++++++++++++++++--
 9 files changed, 164 insertions(+), 39 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4d026023/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode_upgrade.py
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode_upgrade.py
index 6138f8c..8f36001 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode_upgrade.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode_upgrade.py
@@ -24,6 +24,7 @@ from resource_management.core.resources.system import Execute
 from resource_management.core import shell
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions.decorator import retry
+from utils import get_dfsadmin_base_command
 
 
 def pre_rolling_upgrade_shutdown(hdfs_binary):
@@ -41,7 +42,8 @@ def pre_rolling_upgrade_shutdown(hdfs_binary):
   if params.security_enabled:
     Execute(params.dn_kinit_cmd, user = params.hdfs_user)
 
-  command = format('{hdfs_binary} dfsadmin -shutdownDatanode {dfs_dn_ipc_address} upgrade')
+  dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
+  command = format('{dfsadmin_base_command} -shutdownDatanode {dfs_dn_ipc_address} upgrade')
 
   code, output = shell.call(command, user=params.hdfs_user)
   if code == 0:
@@ -93,7 +95,8 @@ def _check_datanode_shutdown(hdfs_binary):
 
   # override stock retry timeouts since after 30 seconds, the datanode is
   # marked as dead and can affect HBase during RU
-  command = format('{hdfs_binary} dfsadmin -D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000
-getDatanodeInfo {dfs_dn_ipc_address}')
+  dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
+  command = format('{dfsadmin_base_command} -D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000
-getDatanodeInfo {dfs_dn_ipc_address}')
 
   try:
     Execute(command, user=params.hdfs_user, tries=1)
@@ -109,22 +112,26 @@ def _check_datanode_shutdown(hdfs_binary):
 def _check_datanode_startup(hdfs_binary):
   """
   Checks that a DataNode is reported as being alive via the
-  "hdfs dfsadmin -report -live" command. Once the DataNode is found to be
+  "hdfs dfsadmin -fs {namenode_address} -report -live" command. Once the DataNode is found
to be
   alive this method will return, otherwise it will raise a Fail(...) and retry
   automatically.
   :param hdfs_binary: name/path of the HDFS binary to use
   :return:
   """
   import params
+  import socket
 
   try:
-    command = format('{hdfs_binary} dfsadmin -report -live')
+    dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
+    command = dfsadmin_base_command + ' -report -live'
     return_code, hdfs_output = shell.call(command, user=params.hdfs_user)
   except:
     raise Fail('Unable to determine if the DataNode has started after upgrade.')
 
   if return_code == 0:
-    if params.hostname.lower() in hdfs_output.lower():
+    hostname = params.hostname.lower()
+    hostname_ip =  socket.gethostbyname(params.hostname.lower())
+    if hostname in hdfs_output.lower() or hostname_ip in hdfs_output.lower():
       Logger.info("DataNode {0} reports that it has rejoined the cluster.".format(params.hostname))
       return
     else:

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d026023/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
index b93f772..52e098e 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
@@ -29,6 +29,7 @@ from resource_management.libraries.resources.execute_hadoop import ExecuteHadoop
 from resource_management.libraries.functions import Direction
 from ambari_commons import OSCheck, OSConst
 from ambari_commons.os_family_impl import OsFamilyImpl, OsFamilyFuncImpl
+from utils import get_dfsadmin_base_command
 
 if OSCheck.is_windows_family():
   from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
@@ -114,12 +115,11 @@ def namenode(action=None, hdfs_binary=None, do_format=True, upgrade_type=None,
e
     if params.security_enabled:
       Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
               user = params.hdfs_user)
-
+    dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary, use_specific_namenode=True)
+    is_namenode_safe_mode_off = dfsadmin_base_command + " -safemode get | grep 'Safe mode
is OFF'"
     if params.dfs_ha_enabled:
-      is_namenode_safe_mode_off = format("{hdfs_binary} dfsadmin -fs hdfs://{namenode_rpc}
-safemode get | grep 'Safe mode is OFF'")
       is_active_namenode_cmd = as_user(format("{hdfs_binary} --config {hadoop_conf_dir} haadmin
-getServiceState {namenode_id} | grep active"), params.hdfs_user, env={'PATH':params.hadoop_bin_dir})
     else:
-      is_namenode_safe_mode_off = format("{hdfs_binary} dfsadmin -fs {namenode_address} -safemode
get | grep 'Safe mode is OFF'")
       is_active_namenode_cmd = True
     
     # During NonRolling Upgrade, both NameNodes are initially down,
@@ -402,7 +402,7 @@ def decommission():
     # need to execute each command scoped to a particular namenode
     nn_refresh_cmd = format('cmd /c hadoop dfsadmin -fs hdfs://{namenode_rpc} -refreshNodes')
   else:
-    nn_refresh_cmd = format('cmd /c hadoop dfsadmin -refreshNodes')
+    nn_refresh_cmd = format('cmd /c hadoop dfsadmin -fs {namenode_address} -refreshNodes')
   Execute(nn_refresh_cmd, user=hdfs_user)
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d026023/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode_upgrade.py
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode_upgrade.py
index 850c32d..d598840 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode_upgrade.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode_upgrade.py
@@ -27,6 +27,7 @@ import utils
 from resource_management.libraries.functions.jmx import get_value_from_jmx
 import namenode_ha_state
 from namenode_ha_state import NAMENODE_STATE, NamenodeHAState
+from utils import get_dfsadmin_base_command
 
 
 def post_upgrade_check():
@@ -81,7 +82,8 @@ def hdfs_roll_edits():
   import params
 
   # TODO, this will be to be doc'ed since existing HDP 2.2 clusters will needs HDFS_CLIENT
on all JOURNALNODE hosts
-  command = 'hdfs dfsadmin -rollEdits'
+  dfsadmin_base_command = get_dfsadmin_base_command('hdfs')
+  command = dfsadmin_base_command + ' -rollEdits'
   Execute(command, user=params.hdfs_user, tries=1)
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d026023/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
index 9800ff1..8684a96 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
@@ -50,7 +50,7 @@ import namenode_upgrade
 from hdfs_namenode import namenode
 from hdfs import hdfs
 import hdfs_rebalance
-from utils import initiate_safe_zkfc_failover, get_hdfs_binary
+from utils import initiate_safe_zkfc_failover, get_hdfs_binary, get_dfsadmin_base_command
 
 
 
@@ -192,11 +192,8 @@ class NameNodeDefault(NameNode):
       hdfs_binary = self.get_hdfs_binary()
       # Note, this fails if namenode_address isn't prefixed with "params."
 
-      is_namenode_safe_mode_off = ""
-      if params.dfs_ha_enabled:
-        is_namenode_safe_mode_off = format("{hdfs_binary} dfsadmin -fs hdfs://{params.namenode_rpc}
-safemode get | grep 'Safe mode is OFF'")
-      else:
-        is_namenode_safe_mode_off = format("{hdfs_binary} dfsadmin -fs {params.namenode_address}
-safemode get | grep 'Safe mode is OFF'")
+      dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary, use_specific_namenode=True)
+      is_namenode_safe_mode_off = dfsadmin_base_command + " -safemode get | grep 'Safe mode
is OFF'"
 
       # Wait up to 30 mins
       Execute(is_namenode_safe_mode_off,
@@ -240,7 +237,9 @@ class NameNodeDefault(NameNode):
     env.set_params(params)
 
     hdfs_binary = self.get_hdfs_binary()
-    Execute(format("{hdfs_binary} dfsadmin -report -live"),
+    dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
+    dfsadmin_cmd = dfsadmin_base_command + " -report -live"
+    Execute(dfsadmin_cmd,
             user=params.hdfs_user,
             tries=60,
             try_sleep=10

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d026023/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_upgrade.py
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_upgrade.py
index 4873b47..a154b73 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_upgrade.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_upgrade.py
@@ -27,6 +27,7 @@ from resource_management.core.exceptions import Fail
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions import get_unique_id_and_date
 from resource_management.libraries.functions import Direction, SafeMode
+from utils import get_dfsadmin_base_command
 
 from namenode_ha_state import NamenodeHAState
 
@@ -34,7 +35,6 @@ from namenode_ha_state import NamenodeHAState
 safemode_to_instruction = {SafeMode.ON: "enter",
                            SafeMode.OFF: "leave"}
 
-
 def prepare_upgrade_check_for_previous_dir():
   """
   During a NonRolling (aka Express Upgrade), preparing the NameNode requires backing up some
data.
@@ -71,7 +71,8 @@ def prepare_upgrade_enter_safe_mode(hdfs_binary):
   """
   import params
 
-  safe_mode_enter_cmd = format("{hdfs_binary} dfsadmin -safemode enter")
+  dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
+  safe_mode_enter_cmd = dfsadmin_base_command + " -safemode enter"
   try:
     # Safe to call if already in Safe Mode
     desired_state = SafeMode.ON
@@ -91,7 +92,8 @@ def prepare_upgrade_save_namespace(hdfs_binary):
   """
   import params
 
-  save_namespace_cmd = format("{hdfs_binary} dfsadmin -saveNamespace")
+  dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
+  save_namespace_cmd = dfsadmin_base_command + " -saveNamespace"
   try:
     Logger.info("Checkpoint the current namespace.")
     as_user(save_namespace_cmd, params.hdfs_user, env={'PATH': params.hadoop_bin_dir})
@@ -139,7 +141,8 @@ def prepare_upgrade_finalize_previous_upgrades(hdfs_binary):
   """
   import params
 
-  finalize_command = format("{hdfs_binary} dfsadmin -rollingUpgrade finalize")
+  dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
+  finalize_command = dfsadmin_base_command + " -rollingUpgrade finalize"
   try:
     Logger.info("Attempt to Finalize if there are any in-progress upgrades. "
                 "This will return 255 if no upgrades are in progress.")
@@ -167,11 +170,8 @@ def reach_safemode_state(user, safemode_state, in_ha, hdfs_binary):
   import params
   original_state = SafeMode.UNKNOWN
 
-  safemode_base_command = ""
-  if params.dfs_ha_enabled:
-    safemode_base_command = format("{hdfs_binary} dfsadmin -fs hdfs://{params.namenode_rpc}
-safemode ")
-  else:
-    safemode_base_command = format("{hdfs_binary} dfsadmin -fs {params.namenode_address}
-safemode ")
+  dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
+  safemode_base_command = dfsadmin_base_command + " -safemode "
   safemode_check_cmd = safemode_base_command + " get"
 
   grep_pattern = format("Safe mode is {safemode_state}")
@@ -233,8 +233,9 @@ def prepare_rolling_upgrade(hdfs_binary):
       if not safemode_transition_successful:
         raise Fail("Could not transition to safemode state %s. Please check logs to make
sure namenode is up." % str(desired_state))
 
-    prepare = format("{hdfs_binary} dfsadmin -rollingUpgrade prepare")
-    query = format("{hdfs_binary} dfsadmin -rollingUpgrade query")
+    dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
+    prepare = dfsadmin_base_command + " -rollingUpgrade prepare"
+    query = dfsadmin_base_command + " -rollingUpgrade query"
     Execute(prepare,
             user=params.hdfs_user,
             logoutput=True)
@@ -255,8 +256,9 @@ def finalize_upgrade(upgrade_type, hdfs_binary):
     kinit_command = format("{params.kinit_path_local} -kt {params.hdfs_user_keytab} {params.hdfs_principal_name}")

     Execute(kinit_command, user=params.hdfs_user, logoutput=True)
 
-  finalize_cmd = format("{hdfs_binary} dfsadmin -rollingUpgrade finalize")
-  query_cmd = format("{hdfs_binary} dfsadmin -rollingUpgrade query")
+  dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
+  finalize_cmd = dfsadmin_base_command + " -rollingUpgrade finalize"
+  query_cmd = dfsadmin_base_command + " -rollingUpgrade query"
 
   Execute(query_cmd,
         user=params.hdfs_user,

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d026023/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
index 2c42e46..aa97af0 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
@@ -357,4 +357,20 @@ def get_hdfs_binary(distro_component_name):
     if Script.is_hdp_stack_greater_or_equal("2.2"):
       hdfs_binary = "/usr/hdp/current/{0}/bin/hdfs".format(distro_component_name)
 
-  return hdfs_binary
\ No newline at end of file
+  return hdfs_binary
+
+def get_dfsadmin_base_command(hdfs_binary, use_specific_namenode = False):
+  """
+  Get the dfsadmin base command constructed using hdfs_binary path and passing namenode address
as explicit -fs argument
+  :param hdfs_binary: path to hdfs binary to use
+  :param use_specific_namenode: flag if set and Namenode HA is enabled, then the dfsadmin
command will use
+  current namenode's address
+  :return: the constructed dfsadmin base command
+  """
+  import params
+  dfsadmin_base_command = ""
+  if params.dfs_ha_enabled and use_specific_namenode:
+    dfsadmin_base_command = format("{hdfs_binary} dfsadmin -fs hdfs://{params.namenode_rpc}")
+  else:
+    dfsadmin_base_command = format("{hdfs_binary} dfsadmin -fs {params.namenode_address}")
+  return dfsadmin_base_command
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d026023/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager_upgrade.py
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager_upgrade.py
index 01f8349..65709ac 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager_upgrade.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager_upgrade.py
@@ -51,6 +51,7 @@ def _check_nodemanager_startup():
   :return:
   '''
   import params
+  import socket
 
   command = 'yarn node -list -states=RUNNING'
 
@@ -62,10 +63,11 @@ def _check_nodemanager_startup():
 
   if return_code == 0:
     hostname = params.hostname.lower()
+    hostname_ip = socket.gethostbyname(params.hostname.lower())
     nodemanager_address = params.nm_address.lower()
     yarn_output = yarn_output.lower()
 
-    if hostname in yarn_output or nodemanager_address in yarn_output:
+    if hostname in yarn_output or nodemanager_address in yarn_output or hostname_ip in yarn_output:
       Logger.info('NodeManager with ID {0} has rejoined the cluster.'.format(nodemanager_address))
       return
     else:

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d026023/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
index 5e33564..c6f8c1d 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
@@ -579,7 +579,37 @@ class TestDatanode(RMFTestCase):
       if str(err.message) != expected_message:
         self.fail("Expected this exception to be thrown. " + expected_message + ". Got this
instead, " + str(err.message))
 
-    self.assertResourceCalled("Execute", "hdfs dfsadmin -D ipc.client.connect.max.retries=5
-D ipc.client.connect.retry.interval=1000 -getDatanodeInfo 0.0.0.0:8010", tries=1, user="hdfs")
+    self.assertResourceCalled("Execute", "hdfs dfsadmin -fs hdfs://c6401.ambari.apache.org:8020
-D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo
0.0.0.0:8010", tries=1, user="hdfs")
+
+  @patch("resource_management.core.shell.call")
+  @patch('time.sleep')
+  def test_stop_during_upgrade(self, time_mock, call_mock):
+    config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/ha_default.json"
+    call_mock_side_effects = [(0, ""), ]
+    call_mock.side_effects = call_mock_side_effects
+    with open(config_file, "r") as f:
+      json_content = json.load(f)
+
+    version = '2.2.1.0-3242'
+    json_content['commandParams']['version'] = version
+
+    try:
+      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
+                         classname = "DataNode",
+                         command = "stop",
+                         config_dict = json_content,
+                         hdp_stack_version = self.STACK_VERSION,
+                         target = RMFTestCase.TARGET_COMMON_SERVICES,
+                         call_mocks = call_mock_side_effects,
+                         command_args=["rolling"])
+
+      raise Fail("Expected a fail since datanode didn't report a shutdown")
+    except Exception, err:
+      expected_message = "DataNode has not shutdown."
+      if str(err.message) != expected_message:
+        self.fail("Expected this exception to be thrown. " + expected_message + ". Got this
instead, " + str(err.message))
+
+    self.assertResourceCalled("Execute", "hdfs dfsadmin -fs hdfs://ns1 -D ipc.client.connect.max.retries=5
-D ipc.client.connect.retry.interval=1000 -getDatanodeInfo 0.0.0.0:8010", tries=1, user="hdfs")
 
   @patch("resource_management.libraries.functions.security_commons.build_expectations")
   @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d026023/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
index 815a1ab..47e35de 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
@@ -1442,13 +1442,30 @@ class TestNamenode(RMFTestCase):
                        config_dict = json_content,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
-    self.assertResourceCalled('Execute', 'hdfs dfsadmin -report -live',
+    self.assertResourceCalled('Execute', 'hdfs dfsadmin -fs hdfs://c6401.ambari.apache.org:8020
-report -live',
                               user='hdfs',
                               tries=60,
                               try_sleep=10
                               )
     self.assertNoMoreResources()
 
+  def test_post_upgrade_ha_restart(self):
+    config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/ha_default.json"
+    with open(config_file, "r") as f:
+      json_content = json.load(f)
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
+                       classname = "NameNode",
+                       command = "post_upgrade_restart",
+                       config_dict = json_content,
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES)
+    self.assertResourceCalled('Execute', 'hdfs dfsadmin -fs hdfs://ns1 -report -live',
+                              user='hdfs',
+                              tries=60,
+                              try_sleep=10
+    )
+    self.assertNoMoreResources()
+
   def test_prepare_rolling_upgrade__upgrade(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/secured.json"
     with open(config_file, "r") as f:
@@ -1466,13 +1483,38 @@ class TestNamenode(RMFTestCase):
     self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hdfs.headless.keytab
hdfs',
       logoutput = True, user = 'hdfs')
     
-    self.assertResourceCalled('Execute', 'hdfs dfsadmin -rollingUpgrade prepare',
+    self.assertResourceCalled('Execute', 'hdfs dfsadmin -fs hdfs://c6401.ambari.apache.org:8020
-rollingUpgrade prepare',
       logoutput = True, user = 'hdfs')
 
-    self.assertResourceCalled('Execute', 'hdfs dfsadmin -rollingUpgrade query',
+    self.assertResourceCalled('Execute', 'hdfs dfsadmin -fs hdfs://c6401.ambari.apache.org:8020
-rollingUpgrade query',
       logoutput = True, user = 'hdfs')
     
     self.assertNoMoreResources()
+
+  def test_prepare_rolling_upgrade__upgrade(self):
+    config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/ha_secured.json"
+    with open(config_file, "r") as f:
+      json_content = json.load(f)
+    json_content['commandParams']['upgrade_direction'] = 'upgrade'
+
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
+                       classname = "NameNode",
+                       command = "prepare_rolling_upgrade",
+                       config_dict = json_content,
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES,
+                       call_mocks = [(0, "Safe mode is OFF in c6401.ambari.apache.org")])
+
+    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hdfs.headless.keytab
hdfs',
+                              logoutput = True, user = 'hdfs')
+
+    self.assertResourceCalled('Execute', 'hdfs dfsadmin -fs hdfs://ns1 -rollingUpgrade prepare',
+                              logoutput = True, user = 'hdfs')
+
+    self.assertResourceCalled('Execute', 'hdfs dfsadmin -fs hdfs://ns1 -rollingUpgrade query',
+                              logoutput = True, user = 'hdfs')
+
+    self.assertNoMoreResources()
   
 
 
@@ -1511,15 +1553,40 @@ class TestNamenode(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
-    self.assertResourceCalled('Execute', 'hdfs dfsadmin -rollingUpgrade query',
+    self.assertResourceCalled('Execute', 'hdfs dfsadmin -fs hdfs://c6401.ambari.apache.org:8020
-rollingUpgrade query',
+                              logoutput = True,
+                              user = 'hdfs',
+                              )
+    self.assertResourceCalled('Execute', 'hdfs dfsadmin -fs hdfs://c6401.ambari.apache.org:8020
-rollingUpgrade finalize',
+                              logoutput = True,
+                              user = 'hdfs',
+                              )
+    self.assertResourceCalled('Execute', 'hdfs dfsadmin -fs hdfs://c6401.ambari.apache.org:8020
-rollingUpgrade query',
+                              logoutput = True,
+                              user = 'hdfs',
+                              )
+    self.assertNoMoreResources()
+
+  def test_finalize_ha_rolling_upgrade(self):
+    config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/ha_default.json"
+    with open(config_file, "r") as f:
+      json_content = json.load(f)
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
+                       classname = "NameNode",
+                       command = "finalize_rolling_upgrade",
+                       config_dict = json_content,
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES)
+
+    self.assertResourceCalled('Execute', 'hdfs dfsadmin -fs hdfs://ns1 -rollingUpgrade query',
                               logoutput = True,
                               user = 'hdfs',
                               )
-    self.assertResourceCalled('Execute', 'hdfs dfsadmin -rollingUpgrade finalize',
+    self.assertResourceCalled('Execute', 'hdfs dfsadmin -fs hdfs://ns1 -rollingUpgrade finalize',
                               logoutput = True,
                               user = 'hdfs',
                               )
-    self.assertResourceCalled('Execute', 'hdfs dfsadmin -rollingUpgrade query',
+    self.assertResourceCalled('Execute', 'hdfs dfsadmin -fs hdfs://ns1 -rollingUpgrade query',
                               logoutput = True,
                               user = 'hdfs',
                               )


Mime
View raw message