ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From vbrodets...@apache.org
Subject ambari git commit: AMBARI-19402. dfs.exclude file does not exist when deploying cluster with NN HA + kerberos via BP.(vbrodetskyi)
Date Fri, 06 Jan 2017 15:09:21 GMT
Repository: ambari
Updated Branches:
  refs/heads/branch-2.5 00852dae5 -> 086380c08


AMBARI-19402. dfs.exclude file does not exist when deploying cluster with NN HA + kerberos
via BP.(vbrodetskyi)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/086380c0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/086380c0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/086380c0

Branch: refs/heads/branch-2.5
Commit: 086380c08bf34c894e9cf65bdde0d5ee387ea392
Parents: 00852da
Author: Vitaly Brodetskyi <vbrodetskyi@hortonworks.com>
Authored: Fri Jan 6 17:08:05 2017 +0200
Committer: Vitaly Brodetskyi <vbrodetskyi@hortonworks.com>
Committed: Fri Jan 6 17:08:53 2017 +0200

----------------------------------------------------------------------
 .../2.1.0.2.0/package/scripts/hdfs_namenode.py  | 30 ++++++-------
 .../python/stacks/2.0.6/HDFS/test_namenode.py   | 44 +++++++++++---------
 2 files changed, 40 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/086380c0/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
index 23119f0..96160db 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
@@ -97,9 +97,6 @@ def namenode(action=None, hdfs_binary=None, do_format=True, upgrade_type=None,
     Logger.info("Called service {0} with upgrade_type: {1}".format(action, str(upgrade_type)))
     setup_ranger_hdfs(upgrade_type=upgrade_type)
     import params
-    if do_format and not params.hdfs_namenode_format_disabled:
-      format_namenode()
-      pass
 
     File(params.exclude_file_path,
          content=Template("exclude_hosts_list.j2"),
@@ -107,6 +104,11 @@ def namenode(action=None, hdfs_binary=None, do_format=True, upgrade_type=None,
          group=params.user_group
     )
 
+    if do_format and not params.hdfs_namenode_format_disabled:
+      format_namenode()
+      pass
+
+
     if params.dfs_ha_enabled and \
       params.dfs_ha_namenode_standby is not None and \
       params.hostname == params.dfs_ha_namenode_standby:
@@ -220,7 +222,7 @@ def namenode(action=None, hdfs_binary=None, do_format=True, upgrade_type=None,
   elif action == "stop":
     import params
     service(
-      action="stop", name="namenode", 
+      action="stop", name="namenode",
       user=params.hdfs_user
     )
   elif action == "status":
@@ -287,7 +289,7 @@ def create_hdfs_directories():
                        owner=params.smoke_user,
                        mode=params.smoke_hdfs_user_mode,
   )
-  params.HdfsResource(None, 
+  params.HdfsResource(None,
                       action="execute",
   )
 
@@ -354,15 +356,15 @@ def is_namenode_formatted(params):
     if os.path.isdir(mark_dir):
       marked = True
       Logger.info(format("{mark_dir} exists. Namenode DFS already formatted"))
-    
+
   # Ensure that all mark dirs created for all name directories
   if marked:
     for mark_dir in mark_dirs:
       Directory(mark_dir,
         create_parents = True
-      )      
-    return marked  
-  
+      )
+    return marked
+
   # Move all old format markers to new place
   for old_mark_dir in old_mark_dirs:
     if os.path.isdir(old_mark_dir):
@@ -373,7 +375,7 @@ def is_namenode_formatted(params):
         marked = True
       Directory(old_mark_dir,
         action = "delete"
-      )    
+      )
     elif os.path.isfile(old_mark_dir):
       for mark_dir in mark_dirs:
         Directory(mark_dir,
@@ -383,7 +385,7 @@ def is_namenode_formatted(params):
         action = "delete"
       )
       marked = True
-      
+
   if marked:
     return True
 
@@ -402,7 +404,7 @@ def is_namenode_formatted(params):
     except Fail:
       Logger.info(format("NameNode will not be formatted since {name_dir} exists and contains
content"))
       return True
-       
+
   return False
 
 @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
@@ -413,13 +415,13 @@ def decommission():
   conf_dir = params.hadoop_conf_dir
   user_group = params.user_group
   nn_kinit_cmd = params.nn_kinit_cmd
-  
+
   File(params.exclude_file_path,
        content=Template("exclude_hosts_list.j2"),
        owner=hdfs_user,
        group=user_group
   )
-  
+
   if not params.update_exclude_file_only:
     Execute(nn_kinit_cmd,
             user=hdfs_user

http://git-wip-us.apache.org/repos/asf/ambari/blob/086380c0/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
index c186af6..8737645 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
@@ -56,6 +56,11 @@ class TestNamenode(RMFTestCase):
                        call_mocks = [(0,"")],
     )
     self.assert_configure_default()
+    self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
+                              owner = 'hdfs',
+                              content = Template('exclude_hosts_list.j2'),
+                              group = 'hadoop',
+                              )
     self.assertResourceCalled('Execute', 'ls /hadoop/hdfs/namenode | wc -l  | grep -q ^0$',)
     self.assertResourceCalled('Execute', 'hdfs --config /etc/hadoop/conf namenode -format
-nonInteractive',
                               path = ['/usr/bin'],
@@ -64,11 +69,7 @@ class TestNamenode(RMFTestCase):
     self.assertResourceCalled('Directory', '/hadoop/hdfs/namenode/namenode-formatted/',
                               create_parents = True,
                               )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
-                              owner = 'hdfs',
-                              content = Template('exclude_hosts_list.j2'),
-                              group = 'hadoop',
-                              )
+
     self.assertResourceCalled('Directory', '/var/run/hadoop',
                               owner = 'hdfs',
                               group = 'hadoop',
@@ -171,6 +172,11 @@ class TestNamenode(RMFTestCase):
                        call_mocks = [(0,"")],
     )
     self.assert_configure_default()
+    self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
+                              owner = 'hdfs',
+                              content = Template('exclude_hosts_list.j2'),
+                              group = 'hadoop',
+                              )
     self.assertResourceCalled('Execute', 'ls /hadoop/hdfs/namenode | wc -l  | grep -q ^0$',)
     self.assertResourceCalled('Execute', 'hdfs --config /etc/hadoop/conf namenode -format
-nonInteractive',
         path = ['/usr/bin'],
@@ -179,11 +185,7 @@ class TestNamenode(RMFTestCase):
     self.assertResourceCalled('Directory', '/hadoop/hdfs/namenode/namenode-formatted/',
         create_parents = True,
     )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
-                              owner = 'hdfs',
-                              content = Template('exclude_hosts_list.j2'),
-                              group = 'hadoop',
-                              )
+
     self.assertResourceCalled('Directory', '/var/run/hadoop',
                               owner = 'hdfs',
                               group = 'hadoop',
@@ -299,6 +301,11 @@ class TestNamenode(RMFTestCase):
                        call_mocks = [(0,"")],
     )
     self.assert_configure_secured()
+    self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
+                              owner = 'hdfs',
+                              content = Template('exclude_hosts_list.j2'),
+                              group = 'hadoop',
+                              )
     self.assertResourceCalled('Execute', 'ls /hadoop/hdfs/namenode | wc -l  | grep -q ^0$',)
     self.assertResourceCalled('Execute', 'hdfs --config /etc/hadoop/conf namenode -format
-nonInteractive',
         path = ['/usr/bin'],
@@ -307,11 +314,7 @@ class TestNamenode(RMFTestCase):
     self.assertResourceCalled('Directory', '/hadoop/hdfs/namenode/namenode-formatted/',
         create_parents = True,
     )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
-                              owner = 'hdfs',
-                              content = Template('exclude_hosts_list.j2'),
-                              group = 'hadoop',
-                              )
+
     self.assertResourceCalled('Directory', '/var/run/hadoop',
                               owner = 'hdfs',
                               group = 'hadoop',
@@ -724,6 +727,11 @@ class TestNamenode(RMFTestCase):
     self.assert_configure_default()
 
     # verify that active namenode was formatted
+    self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
+                              owner = 'hdfs',
+                              content = Template('exclude_hosts_list.j2'),
+                              group = 'hadoop',
+                              )
     self.assertResourceCalled('Execute', 'ls /hadoop/hdfs/namenode | wc -l  | grep -q ^0$',)
     self.assertResourceCalled('Execute', 'hdfs --config /etc/hadoop/conf namenode -format
-nonInteractive',
         path = ['/usr/bin'],
@@ -732,11 +740,7 @@ class TestNamenode(RMFTestCase):
     self.assertResourceCalled('Directory', '/hadoop/hdfs/namenode/namenode-formatted/',
         create_parents = True,
     )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
-                              owner = 'hdfs',
-                              content = Template('exclude_hosts_list.j2'),
-                              group = 'hadoop',
-                              )
+
     self.assertResourceCalled('Directory', '/var/run/hadoop',
                               owner = 'hdfs',
                               group = 'hadoop',


Mime
View raw message