ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From dmitriu...@apache.org
Subject ambari git commit: AMBARI-9461 Issue with namenode format (dlysnichenko)
Date Tue, 03 Feb 2015 21:08:29 GMT
Repository: ambari
Updated Branches:
  refs/heads/trunk 373ff1137 -> d128a0dc6


AMBARI-9461 Issue with namenode format (dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d128a0dc
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d128a0dc
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d128a0dc

Branch: refs/heads/trunk
Commit: d128a0dc6f3298f613435762aecda5cce606ce4b
Parents: 373ff11
Author: Lisnichenko Dmitro <dlysnichenko@hortonworks.com>
Authored: Tue Feb 3 23:06:40 2015 +0200
Committer: Lisnichenko Dmitro <dlysnichenko@hortonworks.com>
Committed: Tue Feb 3 23:07:36 2015 +0200

----------------------------------------------------------------------
 .../2.1.0.2.0/package/files/checkForFormat.sh   |  71 -------------
 .../2.1.0.2.0/package/scripts/hdfs_namenode.py  | 100 ++++++++++++++-----
 .../HDFS/2.1.0.2.0/package/scripts/params.py    |  12 ++-
 .../python/stacks/2.0.6/HDFS/test_namenode.py   |  32 ++----
 4 files changed, 97 insertions(+), 118 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/d128a0dc/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/files/checkForFormat.sh
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/files/checkForFormat.sh
deleted file mode 100644
index 54405f6..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/files/checkForFormat.sh
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export hdfs_user=$1
-shift
-export conf_dir=$1
-shift
-export bin_dir=$1
-shift
-export old_mark_dir=$1
-shift
-export mark_dir=$1
-shift
-export name_dirs=$*
-
-export EXIT_CODE=0
-export command="namenode -format"
-export list_of_non_empty_dirs=""
-
-mark_file=/var/run/hadoop/hdfs/namenode-formatted
-if [[ -f ${mark_file} ]] ; then
-  sudo rm -f ${mark_file}
-  sudo mkdir -p ${mark_dir}
-fi
-
-if [[ -d $old_mark_dir ]] ; then
-  mv ${old_mark_dir} ${mark_dir}
-fi
-
-if [[ ! -d $mark_dir ]] ; then
-  for dir in `echo $name_dirs | tr ',' ' '` ; do
-    echo "NameNode Dirname = $dir"
-    cmd="ls $dir | wc -l  | grep -q ^0$"
-    eval $cmd
-    if [[ $? -ne 0 ]] ; then
-      (( EXIT_CODE = $EXIT_CODE + 1 ))
-      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
-    fi
-  done
-
-  if [[ $EXIT_CODE == 0 ]] ; then
-    sudo su ${hdfs_user} - -s /bin/bash -c "export PATH=$PATH:${bin_dir} ; yes Y | hdfs --config
${conf_dir} ${command}"
-    (( EXIT_CODE = $EXIT_CODE | $? ))
-  else
-    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of
non-empty namenode dirs ${list_of_non_empty_dirs}"
-  fi
-else
-  echo "${mark_dir} exists. Namenode DFS already formatted"
-fi
-
-exit $EXIT_CODE
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/d128a0dc/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
index 143abd8..9581d76 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
@@ -16,6 +16,7 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
+import os.path
 
 from resource_management import *
 from resource_management.core.exceptions import ComponentIsNotRunning
@@ -152,8 +153,8 @@ def create_hdfs_directories(check):
 def format_namenode(force=None):
   import params
 
-  old_mark_dir = params.namenode_formatted_old_mark_dir
-  mark_dir = params.namenode_formatted_mark_dir
+  old_mark_dir = params.namenode_formatted_old_mark_dirs
+  mark_dir = params.namenode_formatted_mark_dirs
   dfs_name_dir = params.dfs_name_dir
   hdfs_user = params.hdfs_user
   hadoop_conf_dir = params.hadoop_conf_dir
@@ -165,38 +166,89 @@ def format_namenode(force=None):
                     bin_dir=params.hadoop_bin_dir,
                     conf_dir=hadoop_conf_dir)
     else:
-      File(format("{tmp_dir}/checkForFormat.sh"),
-           content=StaticFile("checkForFormat.sh"),
-           mode=0755)
-      Execute(format(
-        "{tmp_dir}/checkForFormat.sh {hdfs_user} {hadoop_conf_dir} "
-        "{hadoop_bin_dir} {old_mark_dir} {mark_dir} {dfs_name_dir}"),
-              not_if=format("test -d {old_mark_dir} || test -d {mark_dir}"),
-              path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin"
-      )
-    
-      Directory(mark_dir,
-        recursive = True
-      )
+      if not is_namenode_formatted(params):
+        Execute(format(
+          'sudo su {hdfs_user} - -s /bin/bash -c "export PATH=$PATH:{hadoop_bin_dir} ; yes
Y | hdfs --config {hadoop_conf_dir} namenode -format"'),
+                path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin",
+        )
+        for m_dir in mark_dir:
+          Directory(m_dir,
+            recursive = True
+          )
   else:
     if params.dfs_ha_namenode_active is not None:
       if params.hostname == params.dfs_ha_namenode_active:
         # check and run the format command in the HA deployment scenario
         # only format the "active" namenode in an HA deployment
-        File(format("{tmp_dir}/checkForFormat.sh"),
-             content=StaticFile("checkForFormat.sh"),
-             mode=0755)
         Execute(format(
-          "{tmp_dir}/checkForFormat.sh {hdfs_user} {hadoop_conf_dir} "
-          "{hadoop_bin_dir} {old_mark_dir} {mark_dir} {dfs_name_dir}"),
-                not_if=format("test -d {old_mark_dir} || test -d {mark_dir}"),
-                path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin"
+          'sudo su {hdfs_user} - -s /bin/bash -c "export PATH=$PATH:{hadoop_bin_dir} ; yes
Y | hdfs --config {hadoop_conf_dir} namenode -format"'),
+                path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin",
         )
-        Directory(mark_dir,
-                  recursive=True
+        for m_dir in mark_dir:
+          Directory(m_dir,
+            recursive = True
         )
 
 
+def is_namenode_formatted(params):
+  old_mark_dirs = params.namenode_formatted_old_mark_dirs
+  mark_dirs = params.namenode_formatted_mark_dirs
+  nn_name_dirs = params.dfs_name_dir.split(',')
+  marked = False
+  # Check if name directories have been marked as formatted
+  for mark_dir in mark_dirs:
+    if os.path.isdir(mark_dir):
+      marked = True
+      print format("{mark_dir} exists. Namenode DFS already formatted")
+    
+  # Ensure that all mark dirs created for all name directories
+  if marked:
+    for mark_dir in mark_dirs:
+      Directory(mark_dir,
+        recursive = True
+      )      
+    return marked  
+  
+  # Move all old format markers to new place
+  for old_mark_dir in old_mark_dirs:
+    if os.path.isdir(old_mark_dir):
+      for mark_dir in mark_dirs:
+        Execute(format(
+          "sudo cp -ar {old_mark_dir} {mark_dir}"),
+                path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin"
+        )
+        marked = True
+      Execute(format(
+        "sudo rm -rf {old_mark_dir}"),
+              path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin"
+      )           
+    elif os.path.isfile(old_mark_dir):
+      for mark_dir in mark_dirs:
+        Execute(format(
+          "sudo mkdir -p ${mark_dir}"),
+                path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin"
+        )
+      Execute(format(
+        "sudo rm -f {old_mark_dir}"),
+              path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin"
+      )  
+      marked = True
+      
+  # Check if name dirs are not empty
+  for name_dir in nn_name_dirs:
+    try:
+      Execute(format(
+        "sudo ls {name_dir} | wc -l  | grep -q ^0$"),
+              path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin"
+      )
+      marked = False
+    except Exception:
+      marked = True
+      print format("ERROR: Namenode directory(s) is non empty. Will not format the namenode.
List of non-empty namenode dirs {nn_name_dirs}")
+      break
+       
+  return marked    
+      
 def decommission():
   import params
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/d128a0dc/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
index 8f7962b..ef0991e 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
@@ -173,8 +173,16 @@ namenode_dirs_stub_filename = "namenode_dirs_created"
 smoke_hdfs_user_dir = format("/user/{smoke_user}")
 smoke_hdfs_user_mode = 0770
 
-namenode_formatted_old_mark_dir = format("{hadoop_pid_dir_prefix}/hdfs/namenode/formatted/")
-namenode_formatted_mark_dir = format("/var/lib/hdfs/namenode/formatted/")
+
+hdfs_namenode_formatted_mark_suffix = "/namenode-formatted/"
+namenode_formatted_old_mark_dirs = ["/var/run/hadoop/hdfs/namenode-formatted", 
+  format("{hadoop_pid_dir_prefix}/hdfs/namenode/formatted"),
+  "/var/lib/hdfs/namenode/formatted"]
+dfs_name_dirs = dfs_name_dir.split(",")
+namenode_formatted_mark_dirs = []
+for dn_dir in dfs_name_dirs:
+ tmp_mark_dir = format("{dn_dir}{hdfs_namenode_formatted_mark_suffix}")
+ namenode_formatted_mark_dirs.append(tmp_mark_dir)
 
 fs_checkpoint_dirs = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.dir'].split(',')
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/d128a0dc/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
index 441e697..c60e8a0 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
@@ -51,15 +51,13 @@ class TestNamenode(RMFTestCase):
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
-    self.assertResourceCalled('File', '/tmp/checkForFormat.sh',
-                              content = StaticFile('checkForFormat.sh'),
-                              mode = 0755,
+    self.assertResourceCalled('Execute', 'sudo ls /hadoop/hdfs/namenode | wc -l  | grep -q
^0$',
+                              path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
                               )
-    self.assertResourceCalled('Execute', '/tmp/checkForFormat.sh hdfs /etc/hadoop/conf /usr/bin
/var/run/hadoop/hdfs/namenode/formatted/ /var/lib/hdfs/namenode/formatted/ /hadoop/hdfs/namenode',
+    self.assertResourceCalled('Execute', 'sudo su hdfs - -s /bin/bash -c "export PATH=$PATH:/usr/bin
; yes Y | hdfs --config /etc/hadoop/conf namenode -format"',
                               path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
-                              not_if = 'test -d /var/run/hadoop/hdfs/namenode/formatted/
|| test -d /var/lib/hdfs/namenode/formatted/',
                               )
-    self.assertResourceCalled('Directory', '/var/lib/hdfs/namenode/formatted/',
+    self.assertResourceCalled('Directory', '/hadoop/hdfs/namenode/namenode-formatted/',
                               recursive = True,
                               )
     self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
@@ -88,7 +86,6 @@ class TestNamenode(RMFTestCase):
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1
&& ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
     )
-    self.printResources()
     self.assertResourceCalled('Execute', 'hdfs --config /etc/hadoop/conf dfsadmin -safemode
leave',
         path = ['/usr/bin'],
         user = 'hdfs',
@@ -175,16 +172,14 @@ class TestNamenode(RMFTestCase):
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
-    self.assertResourceCalled('File', '/tmp/checkForFormat.sh',
-                              content = StaticFile('checkForFormat.sh'),
-                              mode = 0755,
+    self.assertResourceCalled('Execute', 'sudo ls /hadoop/hdfs/namenode | wc -l  | grep -q
^0$',
+                              path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
                               )
-    self.assertResourceCalled('Execute', '/tmp/checkForFormat.sh hdfs /etc/hadoop/conf /usr/bin
/var/run/hadoop/hdfs/namenode/formatted/ /var/lib/hdfs/namenode/formatted/ /hadoop/hdfs/namenode',
+    self.assertResourceCalled('Execute', 'sudo su hdfs - -s /bin/bash -c "export PATH=$PATH:/usr/bin
; yes Y | hdfs --config /etc/hadoop/conf namenode -format"',
                               path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
-                              not_if = 'test -d /var/run/hadoop/hdfs/namenode/formatted/
|| test -d /var/lib/hdfs/namenode/formatted/',
                               )
-    self.assertResourceCalled('Directory', '/var/lib/hdfs/namenode/formatted/',
-                              recursive = True
+    self.assertResourceCalled('Directory', '/hadoop/hdfs/namenode/namenode-formatted/',
+                              recursive = True,
                               )
     self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
                               owner = 'hdfs',
@@ -449,15 +444,10 @@ class TestNamenode(RMFTestCase):
     self.assert_configure_default()
 
     # verify that active namenode was formatted
-    self.assertResourceCalled('File', '/tmp/checkForFormat.sh',
-                              content = StaticFile('checkForFormat.sh'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('Execute', '/tmp/checkForFormat.sh hdfs /etc/hadoop/conf /usr/bin
/var/run/hadoop/hdfs/namenode/formatted/ /var/lib/hdfs/namenode/formatted/ /hadoop/hdfs/namenode',
+    self.assertResourceCalled('Execute', 'sudo su hdfs - -s /bin/bash -c "export PATH=$PATH:/usr/bin
; yes Y | hdfs --config /etc/hadoop/conf namenode -format"',
                               path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
-                              not_if = 'test -d /var/run/hadoop/hdfs/namenode/formatted/
|| test -d /var/lib/hdfs/namenode/formatted/',
                               )
-    self.assertResourceCalled('Directory', '/var/lib/hdfs/namenode/formatted/',
+    self.assertResourceCalled('Directory', '/hadoop/hdfs/namenode/namenode-formatted/',
                               recursive = True,
                               )
 


Mime
View raw message