hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From varunsax...@apache.org
Subject [22/50] [abbrv] hadoop git commit: HDFS-10566. Submit plan request should throw exception if Datanode is in non-REGULAR status. Contributed by Xiaobing Zhou.
Date Sun, 06 Nov 2016 09:58:08 GMT
HDFS-10566. Submit plan request should throw exception if Datanode is in non-REGULAR status.
Contributed by Xiaobing Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/730cb0cf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/730cb0cf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/730cb0cf

Branch: refs/heads/YARN-5355
Commit: 730cb0cff6a6e2f1a6eef3593568e8a1b5172cf7
Parents: b592061
Author: Anu Engineer <aengineer@apache.org>
Authored: Wed Nov 2 17:46:56 2016 -0700
Committer: Anu Engineer <aengineer@apache.org>
Committed: Wed Nov 2 17:46:56 2016 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/server/datanode/DataNode.java   |   6 ++
 .../diskbalancer/DiskBalancerException.java     |   1 +
 .../diskbalancer/DiskBalancerTestUtil.java      |  19 ++++
 .../command/TestDiskBalancerCommand.java        | 103 ++++++++++++++-----
 4 files changed, 105 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/730cb0cf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index f89d38c..de9e48e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -3414,6 +3414,12 @@ public class DataNode extends ReconfigurableBase
       String planFile, String planData, boolean skipDateCheck)
       throws IOException {
     checkSuperuserPrivilege();
+    if (getStartupOption(getConf()) != StartupOption.REGULAR) {
+      throw new DiskBalancerException(
+          "Datanode is in special state, e.g. Upgrade/Rollback etc."
+              + " Disk balancing not permitted.",
+          DiskBalancerException.Result.DATANODE_STATUS_NOT_REGULAR);
+    }
     // TODO : Support force option
     this.diskBalancer.submitPlan(planID, planVersion, planFile, planData,
             skipDateCheck);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/730cb0cf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
index 95ff722..a730a57 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
@@ -40,6 +40,7 @@ public class DiskBalancerException extends IOException {
     NO_SUCH_PLAN,
     UNKNOWN_KEY,
     INVALID_NODE,
+    DATANODE_STATUS_NOT_REGULAR,
   }
 
   private final Result result;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/730cb0cf/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java
index c60fe21..bc4181b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
@@ -262,6 +263,23 @@ public class DiskBalancerTestUtil {
       final int defaultBlockSize,
       final int fileLen)
       throws IOException, InterruptedException, TimeoutException {
+    return newImbalancedCluster(
+      conf,
+      numDatanodes,
+      storageCapacities,
+      defaultBlockSize,
+      fileLen,
+      null);
+  }
+
+  public static MiniDFSCluster newImbalancedCluster(
+      final Configuration conf,
+      final int numDatanodes,
+      final long[] storageCapacities,
+      final int defaultBlockSize,
+      final int fileLen,
+      final StartupOption dnOption)
+      throws IOException, InterruptedException, TimeoutException {
     conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize);
     conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, defaultBlockSize);
@@ -281,6 +299,7 @@ public class DiskBalancerTestUtil {
         .storageCapacities(storageCapacities)
         .storageTypes(new StorageType[]{StorageType.DISK, StorageType.DISK})
         .storagesPerDatanode(2)
+        .dnStartupOption(dnOption)
         .build();
     FsVolumeImpl source = null;
     FsVolumeImpl dest = null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/730cb0cf/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
index 0f65f25..ad16cfa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerTestUtil;
 import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
@@ -53,6 +54,7 @@ import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
 import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
+import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.Tool;
@@ -104,6 +106,50 @@ public class TestDiskBalancerCommand {
   }
 
   /**
+   * Tests if it's allowed to submit and execute plan when Datanode is in status
+   * other than REGULAR.
+   */
+  @Test(timeout = 60000)
+  public void testSubmitPlanInNonRegularStatus() throws Exception {
+    final int numDatanodes = 1;
+    MiniDFSCluster miniCluster = null;
+    final Configuration hdfsConf = new HdfsConfiguration();
+
+    try {
+      /* new cluster with imbalanced capacity */
+      miniCluster = DiskBalancerTestUtil.newImbalancedCluster(
+          hdfsConf,
+          numDatanodes,
+          CAPACITIES,
+          DEFAULT_BLOCK_SIZE,
+          FILE_LEN,
+          StartupOption.ROLLBACK);
+
+      /* get full path of plan */
+      final String planFileFullName = runAndVerifyPlan(miniCluster, hdfsConf);
+
+      try {
+        /* run execute command */
+        final String cmdLine = String.format(
+            "hdfs diskbalancer -%s %s",
+            EXECUTE,
+            planFileFullName);
+        runCommand(cmdLine, hdfsConf, miniCluster);
+      } catch(RemoteException e) {
+        assertThat(e.getClassName(), containsString("DiskBalancerException"));
+        assertThat(e.toString(),
+            is(allOf(
+                containsString("Datanode is in special state"),
+                containsString("Disk balancing not permitted."))));
+      }
+    } finally {
+      if (miniCluster != null) {
+        miniCluster.shutdown();
+      }
+    }
+  }
+
+  /**
    * Tests running multiple commands under on setup. This mainly covers
    * {@link org.apache.hadoop.hdfs.server.diskbalancer.command.Command#close}
    */
@@ -122,36 +168,16 @@ public class TestDiskBalancerCommand {
           CAPACITIES,
           DEFAULT_BLOCK_SIZE,
           FILE_LEN);
-      String cmdLine = "";
-      List<String> outputs = null;
-      final DataNode dn = miniCluster.getDataNodes().get(0);
-
-      /* run plan command */
-      cmdLine = String.format(
-          "hdfs diskbalancer -%s %s",
-          PLAN,
-          dn.getDatanodeUuid());
-      outputs = runCommand(cmdLine, hdfsConf, miniCluster);
 
-      /* get path of plan file*/
-      final String planFileName = dn.getDatanodeUuid();
-
-      /* verify plan command */
-      assertEquals(
-          "There must be two lines: the 1st is writing plan to...,"
-              + " the 2nd is actual full path of plan file.",
-          2, outputs.size());
-      assertThat(outputs.get(1), containsString(planFileName));
-
-      /* get full path of plan file*/
-      final String planFileFullName = outputs.get(1);
+      /* get full path of plan */
+      final String planFileFullName = runAndVerifyPlan(miniCluster, hdfsConf);
 
       /* run execute command */
-      cmdLine = String.format(
+      final String cmdLine = String.format(
           "hdfs diskbalancer -%s %s",
           EXECUTE,
           planFileFullName);
-      outputs = runCommand(cmdLine, hdfsConf, miniCluster);
+      runCommand(cmdLine, hdfsConf, miniCluster);
     } finally {
       if (miniCluster != null) {
         miniCluster.shutdown();
@@ -159,6 +185,35 @@ public class TestDiskBalancerCommand {
     }
   }
 
+  private String runAndVerifyPlan(
+      final MiniDFSCluster miniCluster,
+      final Configuration hdfsConf) throws Exception {
+    String cmdLine = "";
+    List<String> outputs = null;
+    final DataNode dn = miniCluster.getDataNodes().get(0);
+
+    /* run plan command */
+    cmdLine = String.format(
+        "hdfs diskbalancer -%s %s",
+        PLAN,
+        dn.getDatanodeUuid());
+    outputs = runCommand(cmdLine, hdfsConf, miniCluster);
+
+    /* get path of plan file*/
+    final String planFileName = dn.getDatanodeUuid();
+
+    /* verify plan command */
+    assertEquals(
+        "There must be two lines: the 1st is writing plan to...,"
+            + " the 2nd is actual full path of plan file.",
+        2, outputs.size());
+    assertThat(outputs.get(1), containsString(planFileName));
+
+    /* get full path of plan file*/
+    final String planFileFullName = outputs.get(1);
+    return planFileFullName;
+  }
+
   /* test basic report */
   @Test(timeout = 60000)
   public void testReportSimple() throws Exception {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message