hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From aengin...@apache.org
Subject [01/50] [abbrv] hadoop git commit: HDFS-12935. Get ambiguous result for DFSAdmin command in HA mode when only one namenode is up. Contributed by Jianfei Jiang.
Date Thu, 15 Feb 2018 23:50:30 GMT
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 fc84744f7 -> a2ffd9cea


HDFS-12935. Get ambiguous result for DFSAdmin command in HA mode when only one namenode is
up. Contributed by Jianfei Jiang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/01bd6ab1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/01bd6ab1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/01bd6ab1

Branch: refs/heads/HDFS-7240
Commit: 01bd6ab18fa48f4c7cac1497905b52e547962599
Parents: 266da25
Author: Brahma Reddy Battula <brahma@apache.org>
Authored: Wed Feb 7 23:10:33 2018 +0530
Committer: Brahma Reddy Battula <brahma@apache.org>
Committed: Wed Feb 7 23:10:33 2018 +0530

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdfs/HAUtil.java     |   9 +-
 .../hdfs/server/namenode/FSNamesystem.java      |   2 +-
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 192 +++++---
 .../hadoop/hdfs/tools/TestDFSAdminWithHA.java   | 464 ++++++++++++++++++-
 4 files changed, 602 insertions(+), 65 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/01bd6ab1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
index 3556086..1d294be 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
+import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.StandbyException;
@@ -325,6 +326,7 @@ public class HAUtil {
    */
   public static boolean isAtLeastOneActive(List<ClientProtocol> namenodes)
       throws IOException {
+    List<IOException> exceptions = new ArrayList<>();
     for (ClientProtocol namenode : namenodes) {
       try {
         namenode.getFileInfo("/");
@@ -334,10 +336,15 @@ public class HAUtil {
         if (cause instanceof StandbyException) {
           // This is expected to happen for a standby NN.
         } else {
-          throw re;
+          exceptions.add(re);
         }
+      } catch (IOException ioe) {
+        exceptions.add(ioe);
       }
     }
+    if(!exceptions.isEmpty()){
+      throw MultipleIOException.createIOException(exceptions);
+    }
     return false;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01bd6ab1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index ece649d..0c9b875 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4437,7 +4437,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
 
   void setBalancerBandwidth(long bandwidth) throws IOException {
     String operationName = "setBalancerBandwidth";
-    checkOperation(OperationCategory.UNCHECKED);
+    checkOperation(OperationCategory.WRITE);
     checkSuperuserPrivilege(operationName);
     getBlockManager().getDatanodeManager().setBalancerBandwidth(bandwidth);
     logAuditEvent(true, operationName, null);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01bd6ab1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 1bedd82..023fea9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -50,7 +50,6 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.FsStatus;
-import org.apache.hadoop.fs.FsTracer;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.shell.Command;
@@ -86,6 +85,7 @@ import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
+import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
@@ -811,16 +811,26 @@ public class DFSAdmin extends FsShell {
       List<ProxyAndInfo<ClientProtocol>> proxies =
           HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
           nsId, ClientProtocol.class);
+      List<IOException> exceptions = new ArrayList<>();
       for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
-        boolean saved = proxy.getProxy().saveNamespace(timeWindow, txGap);
-        if (saved) {
-          System.out.println("Save namespace successful for " +
+        try{
+          boolean saved = proxy.getProxy().saveNamespace(timeWindow, txGap);
+          if (saved) {
+            System.out.println("Save namespace successful for " +
+                proxy.getAddress());
+          } else {
+            System.out.println("No extra checkpoint has been made for "
+                + proxy.getAddress());
+          }
+        }catch (IOException ioe){
+          System.out.println("Save namespace failed for " +
               proxy.getAddress());
-        } else {
-          System.out.println("No extra checkpoint has been made for "
-              + proxy.getAddress());
+          exceptions.add(ioe);
         }
       }
+      if(!exceptions.isEmpty()){
+        throw MultipleIOException.createIOException(exceptions);
+      }
     } else {
       boolean saved = dfs.saveNamespace(timeWindow, txGap);
       if (saved) {
@@ -863,10 +873,20 @@ public class DFSAdmin extends FsShell {
       List<ProxyAndInfo<ClientProtocol>> proxies =
           HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
           nsId, ClientProtocol.class);
+      List<IOException> exceptions = new ArrayList<>();
       for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
-        Boolean res = proxy.getProxy().restoreFailedStorage(arg);
-        System.out.println("restoreFailedStorage is set to " + res + " for "
-            + proxy.getAddress());
+        try{
+          Boolean res = proxy.getProxy().restoreFailedStorage(arg);
+          System.out.println("restoreFailedStorage is set to " + res + " for "
+              + proxy.getAddress());
+        } catch (IOException ioe){
+          System.out.println("restoreFailedStorage failed for "
+              + proxy.getAddress());
+          exceptions.add(ioe);
+        }
+      }
+      if(!exceptions.isEmpty()){
+        throw MultipleIOException.createIOException(exceptions);
       }
     } else {
       Boolean res = dfs.restoreFailedStorage(arg);
@@ -896,10 +916,20 @@ public class DFSAdmin extends FsShell {
       List<ProxyAndInfo<ClientProtocol>> proxies =
           HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
           nsId, ClientProtocol.class);
+      List<IOException> exceptions = new ArrayList<>();
       for (ProxyAndInfo<ClientProtocol> proxy: proxies) {
-        proxy.getProxy().refreshNodes();
-        System.out.println("Refresh nodes successful for " +
-            proxy.getAddress());
+        try{
+          proxy.getProxy().refreshNodes();
+          System.out.println("Refresh nodes successful for " +
+              proxy.getAddress());
+        }catch (IOException ioe){
+          System.out.println("Refresh nodes failed for " +
+              proxy.getAddress());
+          exceptions.add(ioe);
+        }
+      }
+      if(!exceptions.isEmpty()){
+        throw MultipleIOException.createIOException(exceptions);
       }
     } else {
       dfs.refreshNodes();
@@ -944,21 +974,14 @@ public class DFSAdmin extends FsShell {
     EnumSet<OpenFilesType> openFilesTypes = EnumSet.copyOf(types);
 
     DistributedFileSystem dfs = getDFS();
-    Configuration dfsConf = dfs.getConf();
-    URI dfsUri = dfs.getUri();
-    boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
-
     RemoteIterator<OpenFileEntry> openFilesRemoteIterator;
-    if (isHaEnabled) {
-      ProxyAndInfo<ClientProtocol> proxy = NameNodeProxies.createNonHAProxy(
-          dfsConf, HAUtil.getAddressOfActive(getDFS()), ClientProtocol.class,
-          UserGroupInformation.getCurrentUser(), false);
-      openFilesRemoteIterator = new OpenFilesIterator(proxy.getProxy(),
-          FsTracer.get(dfsConf), openFilesTypes, path);
-    } else {
+    try{
       openFilesRemoteIterator = dfs.listOpenFiles(openFilesTypes, path);
+      printOpenFiles(openFilesRemoteIterator);
+    } catch (IOException ioe){
+      System.out.println("List open files failed.");
+      throw ioe;
     }
-    printOpenFiles(openFilesRemoteIterator);
     return 0;
   }
 
@@ -976,8 +999,7 @@ public class DFSAdmin extends FsShell {
   }
 
   /**
-   * Command to ask the namenode to set the balancer bandwidth for all of the
-   * datanodes.
+   * Command to ask the active namenode to set the balancer bandwidth.
    * Usage: hdfs dfsadmin -setBalancerBandwidth bandwidth
    * @param argv List of of command line parameters.
    * @param idx The index of the command that is being processed.
@@ -1008,23 +1030,12 @@ public class DFSAdmin extends FsShell {
     }
 
     DistributedFileSystem dfs = (DistributedFileSystem) fs;
-    Configuration dfsConf = dfs.getConf();
-    URI dfsUri = dfs.getUri();
-    boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
-
-    if (isHaEnabled) {
-      String nsId = dfsUri.getHost();
-      List<ProxyAndInfo<ClientProtocol>> proxies =
-          HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
-          nsId, ClientProtocol.class);
-      for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
-        proxy.getProxy().setBalancerBandwidth(bandwidth);
-        System.out.println("Balancer bandwidth is set to " + bandwidth +
-            " for " + proxy.getAddress());
-      }
-    } else {
+    try{
       dfs.setBalancerBandwidth(bandwidth);
       System.out.println("Balancer bandwidth is set to " + bandwidth);
+    } catch (IOException ioe){
+      System.err.println("Balancer bandwidth is set failed.");
+      throw ioe;
     }
     exitCode = 0;
 
@@ -1382,10 +1393,20 @@ public class DFSAdmin extends FsShell {
       List<ProxyAndInfo<ClientProtocol>> proxies =
           HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
           nsId, ClientProtocol.class);
+      List<IOException> exceptions = new ArrayList<>();
       for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
-        proxy.getProxy().finalizeUpgrade();
-        System.out.println("Finalize upgrade successful for " +
-            proxy.getAddress());
+        try{
+          proxy.getProxy().finalizeUpgrade();
+          System.out.println("Finalize upgrade successful for " +
+              proxy.getAddress());
+        }catch (IOException ioe){
+          System.out.println("Finalize upgrade failed for " +
+              proxy.getAddress());
+          exceptions.add(ioe);
+        }
+      }
+      if(!exceptions.isEmpty()){
+        throw MultipleIOException.createIOException(exceptions);
       }
     } else {
       dfs.finalizeUpgrade();
@@ -1415,10 +1436,21 @@ public class DFSAdmin extends FsShell {
       List<ProxyAndInfo<ClientProtocol>> proxies =
           HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
           nsId, ClientProtocol.class);
+      List<IOException> exceptions = new ArrayList<>();
       for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
-        proxy.getProxy().metaSave(pathname);
-        System.out.println("Created metasave file " + pathname + " in the log "
-            + "directory of namenode " + proxy.getAddress());
+        try{
+          proxy.getProxy().metaSave(pathname);
+          System.out.println("Created metasave file " + pathname
+              + " in the log directory of namenode " + proxy.getAddress());
+        } catch (IOException ioe){
+          System.out.println("Created metasave file " + pathname
+              + " in the log directory of namenode " + proxy.getAddress()
+              + " failed");
+          exceptions.add(ioe);
+        }
+      }
+      if(!exceptions.isEmpty()){
+        throw MultipleIOException.createIOException(exceptions);
       }
     } else {
       dfs.metaSave(pathname);
@@ -1503,10 +1535,20 @@ public class DFSAdmin extends FsShell {
       List<ProxyAndInfo<RefreshAuthorizationPolicyProtocol>> proxies =
           HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
               RefreshAuthorizationPolicyProtocol.class);
+      List<IOException> exceptions = new ArrayList<>();
       for (ProxyAndInfo<RefreshAuthorizationPolicyProtocol> proxy : proxies) {
-        proxy.getProxy().refreshServiceAcl();
-        System.out.println("Refresh service acl successful for "
-            + proxy.getAddress());
+        try{
+          proxy.getProxy().refreshServiceAcl();
+          System.out.println("Refresh service acl successful for "
+              + proxy.getAddress());
+        }catch (IOException ioe){
+          System.out.println("Refresh service acl failed for "
+              + proxy.getAddress());
+          exceptions.add(ioe);
+        }
+      }
+      if(!exceptions.isEmpty()) {
+        throw MultipleIOException.createIOException(exceptions);
       }
     } else {
       // Create the client
@@ -1546,10 +1588,20 @@ public class DFSAdmin extends FsShell {
       List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies =
           HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
               RefreshUserMappingsProtocol.class);
+      List<IOException> exceptions = new ArrayList<>();
       for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) {
-        proxy.getProxy().refreshUserToGroupsMappings();
-        System.out.println("Refresh user to groups mapping successful for "
-            + proxy.getAddress());
+        try{
+          proxy.getProxy().refreshUserToGroupsMappings();
+          System.out.println("Refresh user to groups mapping successful for "
+              + proxy.getAddress());
+        }catch (IOException ioe){
+          System.out.println("Refresh user to groups mapping failed for "
+              + proxy.getAddress());
+          exceptions.add(ioe);
+        }
+      }
+      if(!exceptions.isEmpty()){
+        throw MultipleIOException.createIOException(exceptions);
       }
     } else {
       // Create the client
@@ -1591,10 +1643,20 @@ public class DFSAdmin extends FsShell {
       List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies =
           HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
               RefreshUserMappingsProtocol.class);
+      List<IOException> exceptions = new ArrayList<>();
       for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) {
-        proxy.getProxy().refreshSuperUserGroupsConfiguration();
-        System.out.println("Refresh super user groups configuration " +
-            "successful for " + proxy.getAddress());
+        try{
+          proxy.getProxy().refreshSuperUserGroupsConfiguration();
+          System.out.println("Refresh super user groups configuration " +
+              "successful for " + proxy.getAddress());
+        }catch (IOException ioe){
+          System.out.println("Refresh super user groups configuration " +
+              "failed for " + proxy.getAddress());
+          exceptions.add(ioe);
+        }
+      }
+      if(!exceptions.isEmpty()){
+        throw MultipleIOException.createIOException(exceptions);
       }
     } else {
       // Create the client
@@ -1630,10 +1692,20 @@ public class DFSAdmin extends FsShell {
       List<ProxyAndInfo<RefreshCallQueueProtocol>> proxies =
           HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
               RefreshCallQueueProtocol.class);
+      List<IOException> exceptions = new ArrayList<>();
       for (ProxyAndInfo<RefreshCallQueueProtocol> proxy : proxies) {
-        proxy.getProxy().refreshCallQueue();
-        System.out.println("Refresh call queue successful for "
-            + proxy.getAddress());
+        try{
+          proxy.getProxy().refreshCallQueue();
+          System.out.println("Refresh call queue successful for "
+              + proxy.getAddress());
+        }catch (IOException ioe){
+          System.out.println("Refresh call queue failed for "
+              + proxy.getAddress());
+          exceptions.add(ioe);
+        }
+      }
+      if(!exceptions.isEmpty()){
+        throw MultipleIOException.createIOException(exceptions);
       }
     } else {
       // Create the client

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01bd6ab1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
index 74f5e7a..97daf09 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
@@ -33,6 +33,7 @@ import org.junit.After;
 import org.junit.Test;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -50,7 +51,7 @@ public class TestDFSAdminWithHA {
   private static String newLine = System.getProperty("line.separator");
 
   private void assertOutputMatches(String string) {
-    String errOutput = new String(out.toByteArray(), Charsets.UTF_8);
+    String errOutput = new String(err.toByteArray(), Charsets.UTF_8);
     String output = new String(out.toByteArray(), Charsets.UTF_8);
 
     if (!errOutput.matches(string) && !output.matches(string)) {
@@ -156,6 +157,60 @@ public class TestDFSAdminWithHA {
   }
 
   @Test (timeout = 30000)
+  public void testSaveNamespaceNN1UpNN2Down() throws Exception {
+    setUpHaCluster(false);
+    // Safe mode should be turned ON in order to create namespace image.
+    int exitCode = admin.run(new String[] {"-safemode", "enter"});
+    assertEquals(err.toString().trim(), 0, exitCode);
+    String message = "Safe mode is ON in.*";
+    assertOutputMatches(message + newLine + message + newLine);
+
+    cluster.getDfsCluster().shutdownNameNode(1);
+//
+    exitCode = admin.run(new String[] {"-saveNamespace"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    message = "Save namespace successful for.*" + newLine
+        + "Save namespace failed for.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testSaveNamespaceNN1DownNN2Up() throws Exception {
+    setUpHaCluster(false);
+    // Safe mode should be turned ON in order to create namespace image.
+    int exitCode = admin.run(new String[] {"-safemode", "enter"});
+    assertEquals(err.toString().trim(), 0, exitCode);
+    String message = "Safe mode is ON in.*";
+    assertOutputMatches(message + newLine + message + newLine);
+
+    cluster.getDfsCluster().shutdownNameNode(0);
+
+    exitCode = admin.run(new String[] {"-saveNamespace"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    message = "Save namespace failed for.*" + newLine
+        + "Save namespace successful for.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testSaveNamespaceNN1DownNN2Down() throws Exception {
+    setUpHaCluster(false);
+    // Safe mode should be turned ON in order to create namespace image.
+    int exitCode = admin.run(new String[] {"-safemode", "enter"});
+    assertEquals(err.toString().trim(), 0, exitCode);
+    String message = "Safe mode is ON in.*";
+    assertOutputMatches(message + newLine + message + newLine);
+
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+
+    exitCode = admin.run(new String[] {"-saveNamespace"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    message = "Save namespace failed for.*";
+    assertOutputMatches(message + newLine + message + newLine);
+  }
+
+  @Test (timeout = 30000)
   public void testRestoreFailedStorage() throws Exception {
     setUpHaCluster(false);
     int exitCode = admin.run(new String[] {"-restoreFailedStorage", "check"});
@@ -176,6 +231,76 @@ public class TestDFSAdminWithHA {
   }
 
   @Test (timeout = 30000)
+  public void testRestoreFailedStorageNN1UpNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-restoreFailedStorage", "check"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "restoreFailedStorage is set to false for.*" + newLine
+        + "restoreFailedStorage failed for.*" + newLine;
+    // Default is false
+    assertOutputMatches(message);
+
+    exitCode = admin.run(new String[] {"-restoreFailedStorage", "true"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    message = "restoreFailedStorage is set to true for.*" + newLine
+        + "restoreFailedStorage failed for.*" + newLine;
+    assertOutputMatches(message);
+
+    exitCode = admin.run(new String[] {"-restoreFailedStorage", "false"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    message = "restoreFailedStorage is set to false for.*" + newLine
+        + "restoreFailedStorage failed for.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRestoreFailedStorageNN1DownNN2Up() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    int exitCode = admin.run(new String[] {"-restoreFailedStorage", "check"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "restoreFailedStorage failed for.*" + newLine
+        + "restoreFailedStorage is set to false for.*" + newLine;
+    // Default is false
+    assertOutputMatches(message);
+
+    exitCode = admin.run(new String[] {"-restoreFailedStorage", "true"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    message = "restoreFailedStorage failed for.*" + newLine
+        + "restoreFailedStorage is set to true for.*" + newLine;
+    assertOutputMatches(message);
+
+    exitCode = admin.run(new String[] {"-restoreFailedStorage", "false"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    message = "restoreFailedStorage failed for.*" + newLine
+        + "restoreFailedStorage is set to false for.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRestoreFailedStorageNN1DownNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-restoreFailedStorage", "check"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "restoreFailedStorage failed for.*";
+    // Default is false
+    assertOutputMatches(message + newLine + message + newLine);
+
+    exitCode = admin.run(new String[] {"-restoreFailedStorage", "true"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    message = "restoreFailedStorage failed for.*";
+    assertOutputMatches(message + newLine + message + newLine);
+
+    exitCode = admin.run(new String[] {"-restoreFailedStorage", "false"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    message = "restoreFailedStorage failed for.*";
+    assertOutputMatches(message + newLine + message + newLine);
+  }
+
+  @Test (timeout = 30000)
   public void testRefreshNodes() throws Exception {
     setUpHaCluster(false);
     int exitCode = admin.run(new String[] {"-refreshNodes"});
@@ -185,12 +310,81 @@ public class TestDFSAdminWithHA {
   }
 
   @Test (timeout = 30000)
+  public void testRefreshNodesNN1UpNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-refreshNodes"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh nodes successful for.*" + newLine
+        + "Refresh nodes failed for.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRefreshNodesNN1DownNN2Up() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    int exitCode = admin.run(new String[] {"-refreshNodes"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh nodes failed for.*" + newLine
+        + "Refresh nodes successful for.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRefreshNodesNN1DownNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-refreshNodes"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh nodes failed for.*";
+    assertOutputMatches(message + newLine + message + newLine);
+  }
+
+  @Test (timeout = 30000)
   public void testSetBalancerBandwidth() throws Exception {
     setUpHaCluster(false);
+    cluster.getDfsCluster().transitionToActive(0);
+
     int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"});
     assertEquals(err.toString().trim(), 0, exitCode);
-    String message = "Balancer bandwidth is set to 10 for.*";
-    assertOutputMatches(message + newLine + message + newLine);
+    String message = "Balancer bandwidth is set to 10";
+    assertOutputMatches(message + newLine);
+  }
+
+  @Test (timeout = 30000)
+  public void testSetBalancerBandwidthNN1UpNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    cluster.getDfsCluster().transitionToActive(0);
+    int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"});
+    assertEquals(err.toString().trim(), 0, exitCode);
+    String message = "Balancer bandwidth is set to 10";
+    assertOutputMatches(message + newLine);
+  }
+
+  @Test (timeout = 30000)
+  public void testSetBalancerBandwidthNN1DownNN2Up() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().transitionToActive(1);
+    int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"});
+    assertEquals(err.toString().trim(), 0, exitCode);
+    String message = "Balancer bandwidth is set to 10";
+    assertOutputMatches(message + newLine);
+  }
+
+  @Test
+  public void testSetBalancerBandwidthNN1DownNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Balancer bandwidth is set failed." + newLine
+        + ".*" + newLine;
+    assertOutputMatches(message);
   }
 
   @Test (timeout = 30000)
@@ -211,6 +405,44 @@ public class TestDFSAdminWithHA {
   }
 
   @Test (timeout = 30000)
+  public void testMetaSaveNN1UpNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Created metasave file dfs.meta in the log directory"
+        + " of namenode.*" + newLine
+        + "Created metasave file dfs.meta in the log directory"
+        + " of namenode.*failed" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testMetaSaveNN1DownNN2Up() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Created metasave file dfs.meta in the log directory"
+        + " of namenode.*failed" + newLine
+        + "Created metasave file dfs.meta in the log directory"
+        + " of namenode.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testMetaSaveNN1DownNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Created metasave file dfs.meta in the log directory"
+        + " of namenode.*failed";
+    assertOutputMatches(message + newLine + message + newLine);
+  }
+
+  @Test (timeout = 30000)
   public void testRefreshServiceAcl() throws Exception {
     setUpHaCluster(true);
     int exitCode = admin.run(new String[] {"-refreshServiceAcl"});
@@ -220,6 +452,40 @@ public class TestDFSAdminWithHA {
   }
 
   @Test (timeout = 30000)
+  public void testRefreshServiceAclNN1UpNN2Down() throws Exception {
+    setUpHaCluster(true);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-refreshServiceAcl"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh service acl successful for.*" + newLine
+        + "Refresh service acl failed for.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRefreshServiceAclNN1DownNN2Up() throws Exception {
+    setUpHaCluster(true);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    int exitCode = admin.run(new String[] {"-refreshServiceAcl"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh service acl failed for.*" + newLine
+        + "Refresh service acl successful for.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRefreshServiceAclNN1DownNN2Down() throws Exception {
+    setUpHaCluster(true);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-refreshServiceAcl"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh service acl failed for.*";
+    assertOutputMatches(message + newLine + message + newLine);
+  }
+
+
+  @Test (timeout = 30000)
   public void testRefreshUserToGroupsMappings() throws Exception {
     setUpHaCluster(false);
     int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"});
@@ -229,6 +495,43 @@ public class TestDFSAdminWithHA {
   }
 
   @Test (timeout = 30000)
+  public void testRefreshUserToGroupsMappingsNN1UpNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh user to groups mapping successful for.*"
+        + newLine
+        + "Refresh user to groups mapping failed for.*"
+        + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRefreshUserToGroupsMappingsNN1DownNN2Up() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh user to groups mapping failed for.*"
+        + newLine
+        + "Refresh user to groups mapping successful for.*"
+        + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRefreshUserToGroupsMappingsNN1DownNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh user to groups mapping failed for.*";
+    assertOutputMatches(message + newLine + message + newLine);
+  }
+
+  @Test (timeout = 30000)
   public void testRefreshSuperUserGroupsConfiguration() throws Exception {
     setUpHaCluster(false);
     int exitCode = admin.run(
@@ -239,6 +542,49 @@ public class TestDFSAdminWithHA {
   }
 
   @Test (timeout = 30000)
+  public void testRefreshSuperUserGroupsConfigurationNN1UpNN2Down()
+      throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(
+        new String[] {"-refreshSuperUserGroupsConfiguration"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh super user groups configuration successful for.*"
+        + newLine
+        + "Refresh super user groups configuration failed for.*"
+        + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRefreshSuperUserGroupsConfigurationNN1DownNN2Up()
+      throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    int exitCode = admin.run(
+        new String[] {"-refreshSuperUserGroupsConfiguration"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh super user groups configuration failed for.*"
+        + newLine
+        + "Refresh super user groups configuration successful for.*"
+        + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRefreshSuperUserGroupsConfigurationNN1DownNN2Down()
+      throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(
+        new String[] {"-refreshSuperUserGroupsConfiguration"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh super user groups configuration failed for.*";
+    assertOutputMatches(message + newLine + message + newLine);
+  }
+
+  @Test (timeout = 30000)
   public void testRefreshCallQueue() throws Exception {
     setUpHaCluster(false);
     int exitCode = admin.run(new String[] {"-refreshCallQueue"});
@@ -246,4 +592,116 @@ public class TestDFSAdminWithHA {
     String message = "Refresh call queue successful for.*";
     assertOutputMatches(message + newLine + message + newLine);
   }
+
+  @Test (timeout = 30000)
+  public void testRefreshCallQueueNN1UpNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-refreshCallQueue"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh call queue successful for.*" + newLine
+        + "Refresh call queue failed for.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRefreshCallQueueNN1DownNN2Up() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    int exitCode = admin.run(new String[] {"-refreshCallQueue"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh call queue failed for.*" + newLine
+        + "Refresh call queue successful for.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRefreshCallQueueNN1DownNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-refreshCallQueue"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh call queue failed for.*";
+    assertOutputMatches(message + newLine + message + newLine);
+  }
+
+  @Test (timeout = 30000)
+  public void testFinalizeUpgrade() throws Exception {
+    setUpHaCluster(false);
+    int exitCode = admin.run(new String[] {"-finalizeUpgrade"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = ".*Cannot finalize with no NameNode active";
+    assertOutputMatches(message + newLine);
+
+    cluster.getDfsCluster().transitionToActive(0);
+    exitCode = admin.run(new String[] {"-finalizeUpgrade"});
+    assertEquals(err.toString().trim(), 0, exitCode);
+    message = "Finalize upgrade successful for.*";
+    assertOutputMatches(message + newLine + message + newLine);
+  }
+
+  @Test (timeout = 30000)
+  public void testFinalizeUpgradeNN1UpNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    cluster.getDfsCluster().transitionToActive(0);
+    int exitCode = admin.run(new String[] {"-finalizeUpgrade"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Finalize upgrade successful for .*" + newLine
+        + "Finalize upgrade failed for .*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testFinalizeUpgradeNN1DownNN2Up() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().transitionToActive(1);
+    int exitCode = admin.run(new String[] {"-finalizeUpgrade"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Finalize upgrade failed for .*" + newLine
+        + "Finalize upgrade successful for .*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testFinalizeUpgradeNN1DownNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-finalizeUpgrade"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = ".*2 exceptions.*";
+    assertOutputMatches(message + newLine);
+  }
+
+  @Test (timeout = 30000)
+  public void testListOpenFilesNN1UpNN2Down() throws Exception{
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    cluster.getDfsCluster().transitionToActive(0);
+    int exitCode = admin.run(new String[] {"-listOpenFiles"});
+    assertEquals(err.toString().trim(), 0, exitCode);
+  }
+
+  @Test (timeout = 30000)
+  public void testListOpenFilesNN1DownNN2Up() throws Exception{
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().transitionToActive(1);
+    int exitCode = admin.run(new String[] {"-listOpenFiles"});
+    assertEquals(err.toString().trim(), 0, exitCode);
+  }
+
+  @Test
+  public void testListOpenFilesNN1DownNN2Down() throws Exception{
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-listOpenFiles"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = ".*" + newLine + "List open files failed." + newLine;
+    assertOutputMatches(message);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message