hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cnaur...@apache.org
Subject [12/50] [abbrv] hadoop git commit: HDFS-10737. disk balancer add volume path to report command. Contributed by Yuanbo Liu.
Date Thu, 18 Aug 2016 20:27:14 GMT
HDFS-10737. disk balancer add volume path to report command. Contributed by Yuanbo Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9f29f423
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9f29f423
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9f29f423

Branch: refs/heads/HADOOP-13345
Commit: 9f29f423e426e2d42e650cbed88e46c1c29a2a63
Parents: d677b68
Author: Anu Engineer <aengineer@apache.org>
Authored: Mon Aug 15 09:47:30 2016 -0700
Committer: Anu Engineer <aengineer@apache.org>
Committed: Mon Aug 15 09:47:30 2016 -0700

----------------------------------------------------------------------
 .../server/diskbalancer/command/Command.java    | 35 ++++++++++++++
 .../diskbalancer/command/PlanCommand.java       | 34 --------------
 .../diskbalancer/command/ReportCommand.java     |  4 +-
 .../command/TestDiskBalancerCommand.java        | 48 ++++++++++++++++++++
 4 files changed, 86 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f29f423/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
index de77365..3110c1a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
@@ -33,13 +33,17 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
+import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerConstants;
 import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
 import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
+import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
+import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet;
 import org.apache.hadoop.hdfs.tools.DiskBalancer;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.codehaus.jackson.map.ObjectMapper;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -421,6 +425,37 @@ public abstract class Command extends Configured {
   }
 
   /**
+   * Reads the Physical path of the disks we are balancing. This is needed to
+   * make the disk balancer human friendly and not used in balancing.
+   *
+   * @param node - Disk Balancer Node.
+   */
+  protected void populatePathNames(
+      DiskBalancerDataNode node) throws IOException {
+    // if the cluster is a local file system, there is no need to
+    // invoke rpc call to dataNode.
+    if (getClusterURI().getScheme().startsWith("file")) {
+      return;
+    }
+    String dnAddress = node.getDataNodeIP() + ":" + node.getDataNodePort();
+    ClientDatanodeProtocol dnClient = getDataNodeProxy(dnAddress);
+    String volumeNameJson = dnClient.getDiskBalancerSetting(
+        DiskBalancerConstants.DISKBALANCER_VOLUME_NAME);
+    ObjectMapper mapper = new ObjectMapper();
+
+    @SuppressWarnings("unchecked")
+    Map<String, String> volumeMap =
+        mapper.readValue(volumeNameJson, HashMap.class);
+    for (DiskBalancerVolumeSet set : node.getVolumeSets().values()) {
+      for (DiskBalancerVolume vol : set.getVolumes()) {
+        if (volumeMap.containsKey(vol.getUuid())) {
+          vol.setPath(volumeMap.get(vol.getUuid()));
+        }
+      }
+    }
+  }
+
+  /**
    * Set top number of nodes to be processed.
    * */
   public void setTopNodes(int topNodes) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f29f423/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
index 3159312..72ad2c6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
@@ -24,23 +24,13 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
-import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerConstants;
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel
     .DiskBalancerDataNode;
-import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
-import org.apache.hadoop.hdfs.server.diskbalancer.datamodel
-    .DiskBalancerVolumeSet;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.Step;
 import org.apache.hadoop.hdfs.tools.DiskBalancer;
-import org.codehaus.jackson.map.ObjectMapper;
-
-import java.io.IOException;
 import java.nio.charset.StandardCharsets;
-import java.util.HashMap;
 import java.util.List;
-import java.util.Map;
 
 /**
  * Class that implements Plan Command.
@@ -158,30 +148,6 @@ public class PlanCommand extends Command {
     }
   }
 
-  /**
-   * Reads the Physical path of the disks we are balancing. This is needed to
-   * make the disk balancer human friendly and not used in balancing.
-   *
-   * @param node - Disk Balancer Node.
-   */
-  private void populatePathNames(DiskBalancerDataNode node) throws IOException {
-    String dnAddress = node.getDataNodeIP() + ":" + node.getDataNodePort();
-    ClientDatanodeProtocol dnClient = getDataNodeProxy(dnAddress);
-    String volumeNameJson = dnClient.getDiskBalancerSetting(
-        DiskBalancerConstants.DISKBALANCER_VOLUME_NAME);
-    ObjectMapper mapper = new ObjectMapper();
-
-    @SuppressWarnings("unchecked")
-    Map<String, String> volumeMap =
-        mapper.readValue(volumeNameJson, HashMap.class);
-    for (DiskBalancerVolumeSet set : node.getVolumeSets().values()) {
-      for (DiskBalancerVolume vol : set.getVolumes()) {
-        if (volumeMap.containsKey(vol.getUuid())) {
-          vol.setPath(volumeMap.get(vol.getUuid()));
-        }
-      }
-    }
-  }
 
   /**
    * Gets extended help for this command.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f29f423/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java
index 9f8e399..18dd77e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java
@@ -130,7 +130,7 @@ public class ReportCommand extends Command {
   }
 
   private void handleNodeReport(final CommandLine cmd, StrBuilder result,
-      final String nodeFormat, final String volumeFormat) {
+      final String nodeFormat, final String volumeFormat) throws Exception {
     String outputLine = "";
     /*
      * get value that identifies a DataNode from command line, it could be UUID,
@@ -152,6 +152,8 @@ public class ReportCommand extends Command {
       final String trueStr = "True";
       final String falseStr = "False";
       DiskBalancerDataNode dbdn = getNode(nodeVal);
+      // get storage path of datanode
+      populatePathNames(dbdn);
 
       if (dbdn == null) {
         outputLine = String.format(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f29f423/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
index e55c418..0d24f28 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
@@ -17,6 +17,7 @@
 
 package org.apache.hadoop.hdfs.server.diskbalancer.command;
 
+
 import static org.hamcrest.CoreMatchers.allOf;
 import static org.hamcrest.CoreMatchers.containsString;
 import static org.hamcrest.CoreMatchers.is;
@@ -50,6 +51,7 @@ import static org.apache.hadoop.hdfs.tools.DiskBalancer.HELP;
 import static org.apache.hadoop.hdfs.tools.DiskBalancer.NODE;
 import static org.apache.hadoop.hdfs.tools.DiskBalancer.PLAN;
 import static org.apache.hadoop.hdfs.tools.DiskBalancer.QUERY;
+import static org.apache.hadoop.hdfs.tools.DiskBalancer.REPORT;
 
 import org.junit.Rule;
 import org.junit.rules.ExpectedException;
@@ -262,6 +264,41 @@ public class TestDiskBalancerCommand {
   }
 
   @Test(timeout = 60000)
+  public void testReportNodeWithoutJson() throws Exception {
+    String dataNodeUuid = cluster.getDataNodes().get(0).getDatanodeUuid();
+    final String planArg = String.format("-%s -%s %s",
+        REPORT, NODE, dataNodeUuid);
+    final String cmdLine = String
+        .format(
+            "hdfs diskbalancer %s", planArg);
+    List<String> outputs = runCommand(cmdLine, cluster);
+
+    assertThat(
+        outputs.get(0),
+        containsString("Processing report command"));
+    assertThat(
+        outputs.get(1),
+        is(allOf(containsString("Reporting volume information for DataNode"),
+            containsString(dataNodeUuid))));
+    assertThat(
+        outputs.get(2),
+        is(allOf(containsString(dataNodeUuid),
+            containsString("2 volumes with node data density 0.00"))));
+    assertThat(
+        outputs.get(3),
+        is(allOf(containsString("DISK"),
+            containsString("/dfs/data/data1"),
+            containsString("0.00"),
+            containsString("1.00"))));
+    assertThat(
+        outputs.get(4),
+        is(allOf(containsString("DISK"),
+            containsString("/dfs/data/data2"),
+            containsString("0.00"),
+            containsString("1.00"))));
+  }
+
+  @Test(timeout = 60000)
   public void testReadClusterFromJson() throws Exception {
     ClusterConnector jsonConnector = ConnectorFactory.getCluster(clusterJson,
         conf);
@@ -283,6 +320,17 @@ public class TestDiskBalancerCommand {
     runCommand(cmdLine, cluster);
   }
 
+  /* test -plan  DataNodeID */
+  @Test(timeout = 60000)
+  public void testPlanJsonNode() throws Exception {
+    final String planArg = String.format("-%s %s", PLAN,
+        "a87654a9-54c7-4693-8dd9-c9c7021dc340");
+    final String cmdLine = String
+        .format(
+            "hdfs diskbalancer %s", planArg);
+    runCommand(cmdLine);
+  }
+
   /* Test that illegal arguments are handled correctly*/
   @Test(timeout = 60000)
   public void testIllegalArgument() throws Exception {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message