hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From yjzhan...@apache.org
Subject hadoop git commit: HDFS-9249. NPE is thrown if an IOException is thrown in NameNode constructor. (Wei-Chiu Chuang via Yongjun Zhang)
Date Mon, 09 Nov 2015 22:17:49 GMT
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a4ff03b20 -> 6bca31715


HDFS-9249. NPE is thrown if an IOException is thrown in NameNode constructor. (Wei-Chiu Chuang
via Yongjun Zhang)

(cherry picked from commit 2741a2109b98d0febb463cb318018ecbd3995102)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6bca3171
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6bca3171
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6bca3171

Branch: refs/heads/branch-2
Commit: 6bca317157ddd7a2bbdb3b05d8dceda7d20c9a4b
Parents: a4ff03b
Author: Yongjun Zhang <yzhang@cloudera.com>
Authored: Mon Nov 9 14:04:03 2015 -0800
Committer: Yongjun Zhang <yzhang@cloudera.com>
Committed: Mon Nov 9 14:12:20 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +
 .../hadoop/hdfs/server/namenode/BackupNode.java |  4 +-
 .../hadoop/hdfs/server/namenode/NameNode.java   | 13 +++-
 .../hdfs/server/namenode/TestBackupNode.java    | 71 ++++++++++++++++++++
 4 files changed, 88 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bca3171/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a366428..c3ea7a1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1425,6 +1425,9 @@ Release 2.8.0 - UNRELEASED
     initialization, because HftpFileSystem is missing.
     (Mingliang Liu via cnauroth)
 
+    HDFS-9249. NPE is thrown if an IOException is thrown in NameNode constructor.
+    (Wei-Chiu Chuang via Yongjun Zhang)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bca3171/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
index 5da1f01..3933c66 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
@@ -209,7 +209,9 @@ public class BackupNode extends NameNode {
 
     // Abort current log segment - otherwise the NN shutdown code
     // will close it gracefully, which is incorrect.
-    getFSImage().getEditLog().abortCurrentLogSegment();
+    if (namesystem != null) {
+      getFSImage().getEditLog().abortCurrentLogSegment();
+    }
 
     // Stop name-node threads
     super.stop();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bca3171/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 6102bdc..5b86eea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -891,15 +891,24 @@ public class NameNode implements NameNodeStatusMXBean {
         haContext.writeUnlock();
       }
     } catch (IOException e) {
-      this.stop();
+      this.stopAtException(e);
       throw e;
     } catch (HadoopIllegalArgumentException e) {
-      this.stop();
+      this.stopAtException(e);
       throw e;
     }
     this.started.set(true);
   }
 
+  private void stopAtException(Exception e){
+    try {
+      this.stop();
+    } catch (Exception ex) {
+      LOG.warn("Encountered exception when handling exception ("
+          + e.getMessage() + "):", ex);
+    }
+  }
+
   protected HAState createHAState(StartupOption startOpt) {
     if (!haEnabled || startOpt == StartupOption.UPGRADE 
         || startOpt == StartupOption.UPGRADEONLY) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bca3171/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
index bdc2b28..9f31857 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
@@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.File;
 import java.io.IOException;
@@ -30,7 +31,9 @@ import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.directory.api.ldap.aci.UserClass;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
@@ -46,6 +49,8 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import org.junit.Before;
@@ -124,6 +129,72 @@ public class TestBackupNode {
         Collections.singletonList((int)thisCheckpointTxId));
   }
 
+
+  /**
+   *  Regression test for HDFS-9249.
+   *  This test configures the primary name node with SIMPLE authentication,
+   *  and configures the backup node with Kerberose authentication with
+   *  invalid keytab settings.
+   *
+   *  This configuration causes the backup node to throw a NPE trying to abort
+   *  the edit log.
+   *  */
+  @Test
+    public void startBackupNodeWithIncorrectAuthentication() throws IOException {
+    Configuration c = new HdfsConfiguration();
+    StartupOption startupOpt = StartupOption.CHECKPOINT;
+    String dirs = getBackupNodeDir(startupOpt, 1);
+    c.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:1234");
+    c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, "localhost:0");
+    c.set(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, "0");
+    c.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,
+        -1); // disable block scanner
+    c.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1);
+    c.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, dirs);
+    c.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
+        "${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
+    c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY,
+        "127.0.0.1:0");
+    c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
+        "127.0.0.1:0");
+
+    NameNode nn;
+    try {
+      Configuration nnconf = new HdfsConfiguration(c);
+      DFSTestUtil.formatNameNode(nnconf);
+      nn = NameNode.createNameNode(new String[] {}, nnconf);
+    } catch (IOException e) {
+      LOG.info("IOException is thrown creating name node");
+      throw e;
+    }
+
+    c.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+        "kerberos");
+    c.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, "");
+
+    BackupNode bn = null;
+    try {
+      bn = (BackupNode)NameNode.createNameNode(
+          new String[] {startupOpt.getName()}, c);
+      assertTrue("Namesystem in BackupNode should be null",
+          bn.getNamesystem() == null);
+      fail("Incorrect authentication setting should throw IOException");
+    } catch (IOException e) {
+      LOG.info("IOException thrown as expected", e);
+    } finally {
+      if (nn != null) {
+        nn.stop();
+      }
+      if (bn != null) {
+        bn.stop();
+      }
+      SecurityUtil.setAuthenticationMethod(
+          UserGroupInformation.AuthenticationMethod.SIMPLE, c);
+      // reset security authentication
+      UserGroupInformation.setConfiguration(c);
+    }
+  }
+
   @Test
   public void testCheckpointNode() throws Exception {
     testCheckpoint(StartupOption.CHECKPOINT);


Mime
View raw message