hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a..@apache.org
Subject [31/50] [abbrv] hadoop git commit: HDDS-87:Fix test failures with uninitialized storageLocation field in storageReport Contributed by Shashikant Banerjee
Date Tue, 22 May 2018 20:14:59 GMT
HDDS-87:Fix test failures with uninitialized storageLocation field in storageReport Contributed
by Shashikant Banerjee


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d2d9dbc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d2d9dbc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d2d9dbc

Branch: refs/heads/HDDS-48
Commit: 3d2d9dbcaa73fd72d614a8cf5a5be2806dd31537
Parents: c97df77
Author: Bharat Viswanadham <bharat@apache.org>
Authored: Mon May 21 08:01:51 2018 -0700
Committer: Bharat Viswanadham <bharat@apache.org>
Committed: Mon May 21 08:01:51 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdds/scm/TestUtils.java   | 35 +++++++++---
 .../hdds/scm/node/TestContainerPlacement.java   | 13 ++---
 .../hadoop/hdds/scm/node/TestNodeManager.java   | 58 ++++++++++----------
 .../scm/node/TestSCMNodeStorageStatMap.java     | 22 +++++---
 .../ozone/container/common/TestEndPoint.java    | 32 ++++++-----
 5 files changed, 92 insertions(+), 68 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d2d9dbc/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
index ab94ef6..5cf0a92 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
@@ -16,10 +16,13 @@
  */
 package org.apache.hadoop.hdds.scm;
 
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.protocol
     .proto.StorageContainerDatanodeProtocolProtos.SCMNodeReport;
 import org.apache.hadoop.hdds.protocol.proto
         .StorageContainerDatanodeProtocolProtos.SCMStorageReport;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.StorageTypeProto;
 import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 
@@ -58,19 +61,35 @@ public final class TestUtils {
    * Create Node Report object.
    * @return SCMNodeReport
    */
-  public static SCMNodeReport createNodeReport() {
+  public static SCMNodeReport createNodeReport(List<SCMStorageReport> reports) {
     SCMNodeReport.Builder nodeReport = SCMNodeReport.newBuilder();
-    for (int i = 0; i < 1; i++) {
+    nodeReport.addAllStorageReport(reports);
+    return nodeReport.build();
+  }
+
+  /**
+   * Create SCM Storage Report object.
+   * @return list of SCMStorageReport
+   */
+  public static List<SCMStorageReport> createStorageReport(long capacity,
+      long used, long remaining, String path, StorageTypeProto type, String id,
+      int count) {
+    List<SCMStorageReport> reportList = new ArrayList<>();
+    for (int i = 0; i < count; i++) {
+      Preconditions.checkNotNull(path);
+      Preconditions.checkNotNull(id);
       SCMStorageReport.Builder srb = SCMStorageReport.newBuilder();
-      nodeReport.addStorageReport(i, srb.setStorageUuid("disk")
-          .setCapacity(100)
-          .setScmUsed(10)
-          .setRemaining(90)
-          .build());
+      srb.setStorageUuid(id).setStorageLocation(path).setCapacity(capacity)
+          .setScmUsed(used).setRemaining(remaining);
+      StorageTypeProto storageTypeProto =
+          type == null ? StorageTypeProto.DISK : type;
+      srb.setStorageType(storageTypeProto);
+      reportList.add(srb.build());
     }
-    return nodeReport.build();
+    return reportList;
   }
 
+
   /**
    * Get specified number of DatanodeDetails and registered them with node
    * manager.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d2d9dbc/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
index 6f994a9..321e4e2 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
@@ -36,8 +36,6 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ReportState;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
-import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMStorageReport;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -140,13 +138,12 @@ public class TestContainerPlacement {
         TestUtils.getListOfRegisteredDatanodeDetails(nodeManager, nodeCount);
     try {
       for (DatanodeDetails datanodeDetails : datanodes) {
-        SCMNodeReport.Builder nrb = SCMNodeReport.newBuilder();
-        SCMStorageReport.Builder srb = SCMStorageReport.newBuilder();
-        srb.setStorageUuid(UUID.randomUUID().toString());
-        srb.setCapacity(capacity).setScmUsed(used).
-            setRemaining(remaining).build();
+        String id = UUID.randomUUID().toString();
+        String path = testDir.getAbsolutePath() + "/" + id;
+        List<SCMStorageReport> reports = TestUtils
+            .createStorageReport(capacity, used, remaining, path, null, id, 1);
         nodeManager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
-            nrb.addStorageReport(srb).build(), reportState);
+            TestUtils.createNodeReport(reports), reportState);
       }
 
       GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatProcessed(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d2d9dbc/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
index 117c258..9fe38ce 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
@@ -281,9 +281,13 @@ public class TestNodeManager {
     conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
         100, TimeUnit.MILLISECONDS);
     DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
+    String dnId = datanodeDetails.getUuidString();
+    String storagePath = testDir.getAbsolutePath() + "/" + dnId;
+    List<SCMStorageReport> reports =
+        TestUtils.createStorageReport(100, 10, 90, storagePath, null, dnId, 1);
     try (SCMNodeManager nodemanager = createNodeManager(conf)) {
       nodemanager.register(datanodeDetails.getProtoBufMessage(),
-          TestUtils.createNodeReport());
+          TestUtils.createNodeReport(reports));
       List<SCMCommand> command = nodemanager.sendHeartbeat(
           datanodeDetails.getProtoBufMessage(),
           null, reportState);
@@ -1012,14 +1016,14 @@ public class TestNodeManager {
       for (int x = 0; x < nodeCount; x++) {
         DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(
             nodeManager);
-
-        SCMNodeReport.Builder nrb = SCMNodeReport.newBuilder();
-        SCMStorageReport.Builder srb = SCMStorageReport.newBuilder();
-        srb.setStorageUuid(UUID.randomUUID().toString());
-        srb.setCapacity(capacity).setScmUsed(used).
-            setRemaining(capacity - used).build();
+        String dnId = datanodeDetails.getUuidString();
+        long free = capacity - used;
+        String storagePath = testDir.getAbsolutePath() + "/" + dnId;
+        List<SCMStorageReport> reports = TestUtils
+            .createStorageReport(capacity, used, free, storagePath,
+                null, dnId, 1);
         nodeManager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
-            nrb.addStorageReport(srb).build(), reportState);
+            TestUtils.createNodeReport(reports), reportState);
       }
       GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatProcessed(),
           100, 4 * 1000);
@@ -1055,21 +1059,21 @@ public class TestNodeManager {
     conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
 
     try (SCMNodeManager nodeManager = createNodeManager(conf)) {
-      DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(
-          nodeManager);
+      DatanodeDetails datanodeDetails =
+          TestUtils.getDatanodeDetails(nodeManager);
       final long capacity = 2000;
       final long usedPerHeartbeat = 100;
-
+      String dnId = datanodeDetails.getUuidString();
       for (int x = 0; x < heartbeatCount; x++) {
-        SCMNodeReport.Builder nrb = SCMNodeReport.newBuilder();
-        SCMStorageReport.Builder srb = SCMStorageReport.newBuilder();
-        srb.setStorageUuid(UUID.randomUUID().toString());
-        srb.setCapacity(capacity).setScmUsed(x * usedPerHeartbeat)
-            .setRemaining(capacity - x * usedPerHeartbeat).build();
-        nrb.addStorageReport(srb);
-
-        nodeManager.sendHeartbeat(
-            datanodeDetails.getProtoBufMessage(), nrb.build(), reportState);
+        long scmUsed = x * usedPerHeartbeat;
+        long remaining = capacity - scmUsed;
+        String storagePath = testDir.getAbsolutePath() + "/" + dnId;
+        List<SCMStorageReport> reports = TestUtils
+            .createStorageReport(capacity, scmUsed, remaining, storagePath,
+                null, dnId, 1);
+
+        nodeManager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
+            TestUtils.createNodeReport(reports), reportState);
         Thread.sleep(100);
       }
 
@@ -1145,14 +1149,12 @@ public class TestNodeManager {
       assertEquals(0, foundRemaining);
 
       // Send a new report to bring the dead node back to healthy
-      SCMNodeReport.Builder nrb = SCMNodeReport.newBuilder();
-      SCMStorageReport.Builder srb = SCMStorageReport.newBuilder();
-      srb.setStorageUuid(UUID.randomUUID().toString());
-      srb.setCapacity(capacity).setScmUsed(expectedScmUsed)
-          .setRemaining(expectedRemaining).build();
-      nrb.addStorageReport(srb);
-      nodeManager.sendHeartbeat(
-          datanodeDetails.getProtoBufMessage(), nrb.build(), reportState);
+      String storagePath = testDir.getAbsolutePath() + "/" + dnId;
+      List<SCMStorageReport> reports = TestUtils
+          .createStorageReport(capacity, expectedScmUsed, expectedRemaining,
+              storagePath, null, dnId, 1);
+      nodeManager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
+          TestUtils.createNodeReport(reports), reportState);
 
       // Wait up to 5 seconds so that the dead node becomes healthy
       // Verify usage info should be updated.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d2d9dbc/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
index 03bfbab..2fa786b 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
@@ -18,16 +18,17 @@
 package org.apache.hadoop.hdds.scm.node;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
-import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMStorageReport;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.*;
 import org.junit.rules.ExpectedException;
 
+import java.util.List;
 import java.util.Map;
 import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
@@ -108,14 +109,17 @@ public class TestSCMNodeStorageStatMap {
     SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf);
     map.insertNewDatanode(key, value);
     Assert.assertTrue(map.isKnownDatanode(key));
-    SCMNodeReport.Builder nrb = SCMNodeReport.newBuilder();
-    SCMStorageReport.Builder srb = SCMStorageReport.newBuilder();
-    srb.setStorageUuid(UUID.randomUUID().toString());
-    srb.setCapacity(value.getCapacity().get())
-        .setScmUsed(value.getScmUsed().get()).
-        setRemaining(value.getRemaining().get()).build();
+    String storageId = UUID.randomUUID().toString();
+    String path =
+        GenericTestUtils.getRandomizedTempPath().concat("/" + storageId);
+    long capacity = value.getCapacity().get();
+    long used = value.getScmUsed().get();
+    long remaining = value.getRemaining().get();
+    List<SCMStorageReport> reports = TestUtils
+        .createStorageReport(capacity, used, remaining, path, null, storageId,
+            1);
     SCMNodeStorageStatMap.NodeReportStatus status =
-        map.processNodeReport(key, nrb.addStorageReport(srb).build());
+        map.processNodeReport(key, TestUtils.createNodeReport(reports));
     Assert.assertEquals(status,
         SCMNodeStorageStatMap.NodeReportStatus.ALL_IS_WELL);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d2d9dbc/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
index da39bb3..9ac1467 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
@@ -66,6 +66,7 @@ import static org.mockito.Mockito.mock;
 
 import java.io.File;
 import java.net.InetSocketAddress;
+import java.util.List;
 import java.util.UUID;
 
 import static org.apache.hadoop.hdds.scm.TestUtils.getDatanodeDetails;
@@ -207,12 +208,12 @@ public class TestEndPoint {
   @Test
   public void testRegister() throws Exception {
     DatanodeDetails nodeToRegister = getDatanodeDetails();
-    try (EndpointStateMachine rpcEndPoint =
-             createEndpoint(
-                 SCMTestUtils.getConf(), serverAddress, 1000)) {
+    try (EndpointStateMachine rpcEndPoint = createEndpoint(
+        SCMTestUtils.getConf(), serverAddress, 1000)) {
       SCMRegisteredCmdResponseProto responseProto = rpcEndPoint.getEndPoint()
-          .register(nodeToRegister.getProtoBufMessage(),
-              TestUtils.createNodeReport(),
+          .register(nodeToRegister.getProtoBufMessage(), TestUtils
+                  .createNodeReport(
+                      getStorageReports(nodeToRegister.getUuidString())),
               createContainerReport(10, nodeToRegister));
       Assert.assertNotNull(responseProto);
       Assert.assertEquals(nodeToRegister.getUuidString(),
@@ -220,11 +221,15 @@ public class TestEndPoint {
       Assert.assertNotNull(responseProto.getClusterID());
       Assert.assertEquals(10, scmServerImpl.
           getContainerCountsForDatanode(nodeToRegister));
-      Assert.assertEquals(1, scmServerImpl.getNodeReportsCount(
-          nodeToRegister));
+      Assert.assertEquals(1, scmServerImpl.getNodeReportsCount(nodeToRegister));
     }
   }
 
+  private List<SCMStorageReport> getStorageReports(String id) {
+    String storagePath = testDir.getAbsolutePath() + "/" + id;
+    return TestUtils.createStorageReport(100, 10, 90, storagePath, null, id, 1);
+  }
+
   private EndpointStateMachine registerTaskHelper(InetSocketAddress scmAddress,
       int rpcTimeout, boolean clearDatanodeDetails) throws Exception {
     Configuration conf = SCMTestUtils.getConf();
@@ -234,7 +239,7 @@ public class TestEndPoint {
     rpcEndPoint.setState(EndpointStateMachine.EndPointStates.REGISTER);
     OzoneContainer ozoneContainer = mock(OzoneContainer.class);
     when(ozoneContainer.getNodeReport()).thenReturn(TestUtils
-        .createNodeReport());
+        .createNodeReport(getStorageReports(UUID.randomUUID().toString())));
     when(ozoneContainer.getContainerReport()).thenReturn(
         createContainerReport(10, null));
     RegisterEndpointTask endpointTask =
@@ -297,14 +302,11 @@ public class TestEndPoint {
     try (EndpointStateMachine rpcEndPoint =
              createEndpoint(SCMTestUtils.getConf(),
                  serverAddress, 1000)) {
-      SCMNodeReport.Builder nrb = SCMNodeReport.newBuilder();
-      SCMStorageReport.Builder srb = SCMStorageReport.newBuilder();
-      srb.setStorageUuid(UUID.randomUUID().toString());
-      srb.setCapacity(2000).setScmUsed(500).setRemaining(1500).build();
-      nrb.addStorageReport(srb);
+      String storageId = UUID.randomUUID().toString();
       SCMHeartbeatResponseProto responseProto = rpcEndPoint.getEndPoint()
-          .sendHeartbeat(
-              dataNode.getProtoBufMessage(), nrb.build(), defaultReportState);
+          .sendHeartbeat(dataNode.getProtoBufMessage(),
+              TestUtils.createNodeReport(getStorageReports(storageId)),
+              defaultReportState);
       Assert.assertNotNull(responseProto);
       Assert.assertEquals(0, responseProto.getCommandsCount());
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message