hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From bot...@apache.org
Subject [11/50] [abbrv] hadoop git commit: HDDS-737. Introduce Incremental Container Report. Contributed by Nanda kumar.
Date Tue, 13 Nov 2018 00:22:31 GMT
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c80f753b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index c1409cb..5f419d3 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -39,10 +39,10 @@ import org.apache.hadoop.hdds.scm.block.PendingDeleteHandler;
 import org.apache.hadoop.hdds.scm.chillmode.SCMChillModeManager;
 import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler;
 import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler;
-import org.apache.hadoop.hdds.scm.container.CloseContainerWatcher;
 import org.apache.hadoop.hdds.scm.container.ContainerActionsHandler;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.container.IncrementalContainerReportHandler;
 import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
 import org.apache.hadoop.hdds.scm.container.ContainerReportHandler;
 import org.apache.hadoop.hdds.scm.container.replication
@@ -221,7 +221,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
     CommandStatusReportHandler cmdStatusReportHandler =
         new CommandStatusReportHandler();
 
-    NewNodeHandler newNodeHandler = new NewNodeHandler(scmNodeManager);
+    NewNodeHandler newNodeHandler = new NewNodeHandler();
     StaleNodeHandler staleNodeHandler =
         new StaleNodeHandler(scmNodeManager, pipelineManager);
     DeadNodeHandler deadNodeHandler = new DeadNodeHandler(scmNodeManager,
@@ -231,8 +231,12 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
         new PendingDeleteHandler(scmBlockManager.getSCMBlockDeletingService());
 
     ContainerReportHandler containerReportHandler =
-        new ContainerReportHandler(containerManager, scmNodeManager,
-            replicationStatus);
+        new ContainerReportHandler(scmNodeManager, pipelineManager,
+            containerManager, replicationStatus);
+
+    IncrementalContainerReportHandler incrementalContainerReportHandler =
+        new IncrementalContainerReportHandler(
+            pipelineManager, containerManager);
 
     PipelineActionHandler pipelineActionHandler =
         new PipelineActionHandler(pipelineManager);
@@ -258,13 +262,6 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
     replicationManager = new ReplicationManager(containerPlacementPolicy,
         containerManager, eventQueue, commandWatcherLeaseManager);
 
-    // setup CloseContainer watcher
-    CloseContainerWatcher closeContainerWatcher =
-        new CloseContainerWatcher(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ,
-            SCMEvents.CLOSE_CONTAINER_STATUS, commandWatcherLeaseManager,
-            containerManager);
-    closeContainerWatcher.start(eventQueue);
-
     scmAdminUsernames = conf.getTrimmedStringCollection(OzoneConfigKeys
         .OZONE_ADMINISTRATORS);
     scmUsername = UserGroupInformation.getCurrentUser().getUserName();
@@ -282,6 +279,8 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
     eventQueue.addHandler(SCMEvents.RETRIABLE_DATANODE_COMMAND, scmNodeManager);
     eventQueue.addHandler(SCMEvents.NODE_REPORT, nodeReportHandler);
     eventQueue.addHandler(SCMEvents.CONTAINER_REPORT, containerReportHandler);
+    eventQueue.addHandler(SCMEvents.INCREMENTAL_CONTAINER_REPORT,
+        incrementalContainerReportHandler);
     eventQueue.addHandler(SCMEvents.CONTAINER_ACTIONS, actionsHandler);
     eventQueue.addHandler(SCMEvents.CLOSE_CONTAINER, closeContainerHandler);
     eventQueue.addHandler(SCMEvents.NEW_NODE, newNodeHandler);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c80f753b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java
index afa25e2..279acf0 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java
@@ -41,7 +41,6 @@ import org.slf4j.LoggerFactory;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
-import java.util.UUID;
 
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertFalse;
@@ -51,8 +50,6 @@ public class TestCommandStatusReportHandler implements EventPublisher {
   private static final Logger LOG = LoggerFactory
       .getLogger(TestCommandStatusReportHandler.class);
   private CommandStatusReportHandler cmdStatusReportHandler;
-  private String storagePath = GenericTestUtils.getRandomizedTempPath()
-      .concat("/" + UUID.randomUUID().toString());
 
   @Before
   public void setup() {
@@ -68,8 +65,6 @@ public class TestCommandStatusReportHandler implements EventPublisher {
         .emptyList());
     cmdStatusReportHandler.onMessage(report, this);
     assertFalse(logCapturer.getOutput().contains("Delete_Block_Status"));
-    assertFalse(logCapturer.getOutput().contains(
-        "Close_Container_Command_Status"));
     assertFalse(logCapturer.getOutput().contains("Replicate_Command_Status"));
 
 
@@ -78,13 +73,9 @@ public class TestCommandStatusReportHandler implements EventPublisher {
     assertTrue(logCapturer.getOutput().contains("firing event of type " +
         "Delete_Block_Status"));
     assertTrue(logCapturer.getOutput().contains("firing event of type " +
-        "Close_Container_Command_Status"));
-    assertTrue(logCapturer.getOutput().contains("firing event of type " +
         "Replicate_Command_Status"));
 
     assertTrue(logCapturer.getOutput().contains("type: " +
-        "closeContainerCommand"));
-    assertTrue(logCapturer.getOutput().contains("type: " +
         "deleteBlocksCommand"));
     assertTrue(logCapturer.getOutput().contains("type: " +
         "replicateContainerCommand"));
@@ -119,11 +110,6 @@ public class TestCommandStatusReportHandler implements EventPublisher {
         .setType(Type.deleteBlocksCommand);
     reports.add(builder.build());
 
-    builder.setCmdId(HddsIdFactory.getLongId())
-        .setStatus(CommandStatus.Status.EXECUTED)
-        .setType(Type.closeContainerCommand);
-    reports.add(builder.build());
-
     builder.setMsg("Not enough space")
         .setCmdId(HddsIdFactory.getLongId())
         .setStatus(CommandStatus.Status.FAILED)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c80f753b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
index 8d36d29..d5e68a8 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
-import org.apache.hadoop.hdds.scm.node.states.ReportResult;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.protocol.VersionResponse;
@@ -44,6 +43,7 @@ import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.assertj.core.util.Preconditions;
 
 import java.io.IOException;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
@@ -139,18 +139,6 @@ public class MockNodeManager implements NodeManager {
   }
 
   /**
-   * Removes a data node from the management of this Node Manager.
-   *
-   * @param node - DataNode.
-   * @throws NodeNotFoundException
-   */
-  @Override
-  public void removeNode(DatanodeDetails node)
-      throws NodeNotFoundException {
-
-  }
-
-  /**
    * Gets all Live Datanodes that is currently communicating with SCM.
    *
    * @param nodestate - State of the node
@@ -248,8 +236,8 @@ public class MockNodeManager implements NodeManager {
    * @return Set of PipelineID
    */
   @Override
-  public Set<PipelineID> getPipelineByDnID(UUID dnId) {
-    return node2PipelineMap.getPipelines(dnId);
+  public Set<PipelineID> getPipelines(DatanodeDetails dnId) {
+    return node2PipelineMap.getPipelines(dnId.getUuid());
   }
 
   /**
@@ -290,7 +278,8 @@ public class MockNodeManager implements NodeManager {
    * @param nodeReport
    */
   @Override
-  public void processNodeReport(UUID dnUuid, NodeReportProto nodeReport) {
+  public void processNodeReport(DatanodeDetails dnUuid,
+      NodeReportProto nodeReport) {
     // do nothing
   }
 
@@ -302,21 +291,13 @@ public class MockNodeManager implements NodeManager {
    *                        addDatanodeInContainerMap call.
    */
   @Override
-  public void setContainersForDatanode(UUID uuid, Set<ContainerID> containerIds)
-      throws SCMException {
-    node2ContainerMap.setContainersForDatanode(uuid, containerIds);
-  }
-
-  /**
-   * Process containerReport received from datanode.
-   * @param uuid - DataonodeID
-   * @param containerIds - Set of containerIDs
-   * @return The result after processing containerReport
-   */
-  @Override
-  public ReportResult<ContainerID> processContainerReport(UUID uuid,
-      Set<ContainerID> containerIds) {
-    return node2ContainerMap.processReport(uuid, containerIds);
+  public void setContainers(DatanodeDetails uuid, Set<ContainerID> containerIds)
+      throws NodeNotFoundException {
+    try {
+      node2ContainerMap.setContainersForDatanode(uuid.getUuid(), containerIds);
+    } catch (SCMException e) {
+      throw new NodeNotFoundException(e.getMessage());
+    }
   }
 
   /**
@@ -325,21 +306,8 @@ public class MockNodeManager implements NodeManager {
    * @return - set of containerIDs
    */
   @Override
-  public Set<ContainerID> getContainers(UUID uuid) {
-    return node2ContainerMap.getContainers(uuid);
-  }
-
-  /**
-   * Insert a new datanode with set of containerIDs for containers available
-   * on it.
-   * @param uuid - DatanodeID
-   * @param containerIDs - Set of ContainerIDs
-   * @throws SCMException - if datanode already exists
-   */
-  @Override
-  public void addDatanodeInContainerMap(UUID uuid,
-      Set<ContainerID> containerIDs) throws SCMException {
-    node2ContainerMap.insertNewDatanode(uuid, containerIDs);
+  public Set<ContainerID> getContainers(DatanodeDetails uuid) {
+    return node2ContainerMap.getContainers(uuid.getUuid());
   }
 
   // Returns the number of commands that is queued to this node manager.
@@ -393,6 +361,12 @@ public class MockNodeManager implements NodeManager {
   @Override
   public RegisteredCommand register(DatanodeDetails datanodeDetails,
       NodeReportProto nodeReport, PipelineReportsProto pipelineReportsProto) {
+    try {
+      node2ContainerMap.insertNewDatanode(datanodeDetails.getUuid(),
+          Collections.emptySet());
+    } catch (SCMException e) {
+      e.printStackTrace();
+    }
     return null;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c80f753b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
index faae6f5..a2eba92 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hdds.scm.container.replication
     .ReplicationActivityStatus;
 import org.apache.hadoop.hdds.scm.container.replication.ReplicationRequest;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
 import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
@@ -69,7 +70,7 @@ public class TestContainerReportHandler implements EventPublisher {
   //TODO: Rewrite it
   @Ignore
   @Test
-  public void test() throws IOException {
+  public void test() throws IOException, NodeNotFoundException {
     String testDir = GenericTestUtils.getTempPath(
         this.getClass().getSimpleName());
     //GIVEN
@@ -85,17 +86,17 @@ public class TestContainerReportHandler implements EventPublisher {
         new ReplicationActivityStatus();
 
     ContainerReportHandler reportHandler =
-        new ContainerReportHandler(containerManager, nodeManager,
-            replicationActivityStatus);
+        new ContainerReportHandler(nodeManager, pipelineManager,
+            containerManager, replicationActivityStatus);
 
     DatanodeDetails dn1 = TestUtils.randomDatanodeDetails();
     DatanodeDetails dn2 = TestUtils.randomDatanodeDetails();
     DatanodeDetails dn3 = TestUtils.randomDatanodeDetails();
     DatanodeDetails dn4 = TestUtils.randomDatanodeDetails();
-    nodeManager.addDatanodeInContainerMap(dn1.getUuid(), new HashSet<>());
-    nodeManager.addDatanodeInContainerMap(dn2.getUuid(), new HashSet<>());
-    nodeManager.addDatanodeInContainerMap(dn3.getUuid(), new HashSet<>());
-    nodeManager.addDatanodeInContainerMap(dn4.getUuid(), new HashSet<>());
+    nodeManager.setContainers(dn1, new HashSet<>());
+    nodeManager.setContainers(dn2, new HashSet<>());
+    nodeManager.setContainers(dn3, new HashSet<>());
+    nodeManager.setContainers(dn4, new HashSet<>());
 
     ContainerInfo cont1 = containerManager
         .allocateContainer(ReplicationType.STAND_ALONE,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c80f753b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
index 446eb58..5c4617c 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
@@ -24,6 +24,8 @@ import java.util.Set;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
 import org.apache.hadoop.hdds.scm.TestUtils;
 
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
@@ -96,6 +98,7 @@ public class TestContainerStateManager {
       throws ContainerNotFoundException {
     ContainerReplica replica = ContainerReplica.newBuilder()
         .setContainerID(cont.containerID())
+        .setContainerState(ContainerReplicaProto.State.CLOSED)
         .setDatanodeDetails(node)
         .build();
     containerStateManager

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c80f753b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
index be4cce6..fad67b8 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
@@ -22,7 +22,6 @@ import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
@@ -30,8 +29,6 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
 import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
 import org.apache.hadoop.hdds.server.events.EventQueue;
@@ -47,8 +44,6 @@ import org.junit.rules.ExpectedException;
 
 import java.io.File;
 import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
 import java.util.Random;
 import java.util.Set;
 import java.util.TreeSet;
@@ -187,10 +182,12 @@ public class TestSCMContainerManager {
 
     containerManager.updateContainerReplica(contInfo.containerID(),
         ContainerReplica.newBuilder().setContainerID(contInfo.containerID())
-        .setDatanodeDetails(dn1).build());
+            .setContainerState(ContainerReplicaProto.State.CLOSED)
+            .setDatanodeDetails(dn1).build());
     containerManager.updateContainerReplica(contInfo.containerID(),
         ContainerReplica.newBuilder().setContainerID(contInfo.containerID())
-        .setDatanodeDetails(dn2).build());
+            .setContainerState(ContainerReplicaProto.State.CLOSED)
+            .setDatanodeDetails(dn2).build());
 
     Assert.assertEquals(2,
         containerManager.getContainerReplicas(
@@ -240,102 +237,6 @@ public class TestSCMContainerManager {
             HddsProtos.LifeCycleEvent.CREATED);
   }
 
-  @Test
-  public void testFullContainerReport() throws Exception {
-    ContainerInfo info = createContainer();
-    DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
-    List<ContainerReplicaProto> reports =
-        new ArrayList<>();
-    ContainerReplicaProto.Builder ciBuilder =
-        ContainerReplicaProto.newBuilder();
-    ciBuilder.setFinalhash("e16cc9d6024365750ed8dbd194ea46d2")
-        .setSize(5368709120L)
-        .setUsed(2000000000L)
-        .setKeyCount(100000000L)
-        .setReadCount(100000000L)
-        .setWriteCount(100000000L)
-        .setReadBytes(2000000000L)
-        .setWriteBytes(2000000000L)
-        .setContainerID(info.getContainerID())
-        .setState(ContainerReplicaProto.State.CLOSED)
-        .setDeleteTransactionId(0);
-
-    reports.add(ciBuilder.build());
-
-    ContainerReportsProto.Builder crBuilder = ContainerReportsProto
-        .newBuilder();
-    crBuilder.addAllReports(reports);
-
-    containerManager.processContainerReports(
-        datanodeDetails, crBuilder.build());
-
-    ContainerInfo updatedContainer =
-        containerManager.getContainer(info.containerID());
-    Assert.assertEquals(100000000L,
-        updatedContainer.getNumberOfKeys());
-    Assert.assertEquals(2000000000L, updatedContainer.getUsedBytes());
-
-    for (ContainerReplicaProto c : reports) {
-     Assert.assertEquals(containerManager.getContainerReplicas(
-         ContainerID.valueof(c.getContainerID())).size(), 1);
-    }
-
-    containerManager.processContainerReports(TestUtils.randomDatanodeDetails(),
-        crBuilder.build());
-    for (ContainerReplicaProto c : reports) {
-      Assert.assertEquals(containerManager.getContainerReplicas(
-              ContainerID.valueof(c.getContainerID())).size(), 2);
-    }
-  }
-
-  @Test
-  public void testListContainerAfterReport() throws Exception {
-    ContainerInfo info1 = createContainer();
-    ContainerInfo info2 = createContainer();
-    DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
-    List<ContainerReplicaProto> reports =
-        new ArrayList<>();
-    ContainerReplicaProto.Builder ciBuilder =
-        ContainerReplicaProto.newBuilder();
-    long cID1 = info1.getContainerID();
-    long cID2 = info2.getContainerID();
-    ciBuilder.setFinalhash("e16cc9d6024365750ed8dbd194ea46d2")
-        .setSize(1000000000L)
-        .setUsed(987654321L)
-        .setKeyCount(100000000L)
-        .setReadBytes(1000000000L)
-        .setWriteBytes(1000000000L)
-        .setContainerID(cID1)
-        .setState(ContainerReplicaProto.State.CLOSED);
-    reports.add(ciBuilder.build());
-
-    ciBuilder.setFinalhash("e16cc9d6024365750ed8dbd194ea54a9")
-        .setSize(1000000000L)
-        .setUsed(123456789L)
-        .setKeyCount(200000000L)
-        .setReadBytes(3000000000L)
-        .setWriteBytes(4000000000L)
-        .setContainerID(cID2);
-    reports.add(ciBuilder.build());
-
-    ContainerReportsProto.Builder crBuilder = ContainerReportsProto
-        .newBuilder();
-    crBuilder.addAllReports(reports);
-
-    containerManager.processContainerReports(
-        datanodeDetails, crBuilder.build());
-
-    List<ContainerInfo> list = containerManager.listContainer(
-        ContainerID.valueof(1), 50);
-    Assert.assertEquals(2, list.stream().filter(
-        x -> x.getContainerID() == cID1 || x.getContainerID() == cID2).count());
-    Assert.assertEquals(300000000L, list.stream().filter(
-        x -> x.getContainerID() == cID1 || x.getContainerID() == cID2)
-        .mapToLong(x -> x.getNumberOfKeys()).sum());
-    Assert.assertEquals(1111111110L, list.stream().filter(
-        x -> x.getContainerID() == cID1 || x.getContainerID() == cID2)
-        .mapToLong(x -> x.getUsedBytes()).sum());
-  }
 
   @Test
   public void testCloseContainer() throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c80f753b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
index 571a5fb..0be279d 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
@@ -28,7 +28,10 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicateContainerCommandProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ReplicateContainerCommandProto;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerManager;
@@ -36,8 +39,10 @@ import org.apache.hadoop.hdds.scm.container.ContainerReplica;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy;
-import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager.ReplicationRequestToRepeat;
+import org.apache.hadoop.hdds.scm.container.placement.algorithms
+    .ContainerPlacementPolicy;
+import org.apache.hadoop.hdds.scm.container.replication
+    .ReplicationManager.ReplicationRequestToRepeat;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.ozone.lease.LeaseManager;
@@ -81,6 +86,7 @@ public class TestReplicationManager {
       listOfDatanodeDetails.add(dd);
       listOfContainerReplica.add(ContainerReplica.newBuilder()
           .setContainerID(ContainerID.valueof(i))
+          .setContainerState(ContainerReplicaProto.State.CLOSED)
           .setDatanodeDetails(dd).build());
     });
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c80f753b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
index 51099f4..c1f2c69 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
@@ -30,8 +30,12 @@ import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.StorageReportProto;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerManager;
@@ -42,10 +46,11 @@ import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
 import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.NodeReportFromDatanode;
+import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
+    .NodeReportFromDatanode;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 
 import org.apache.hadoop.hdds.server.events.EventQueue;
@@ -93,7 +98,7 @@ public class TestDeadNodeHandler {
   }
 
   @Test
-  public void testOnMessage() throws IOException {
+  public void testOnMessage() throws IOException, NodeNotFoundException {
     //GIVEN
     DatanodeDetails datanode1 = TestUtils.randomDatanodeDetails();
     DatanodeDetails datanode2 = TestUtils.randomDatanodeDetails();
@@ -269,11 +274,9 @@ public class TestDeadNodeHandler {
         container1.containerID(), HddsProtos.LifeCycleEvent.CREATED);
     TestUtils.closeContainer(containerManager, container1.containerID());
 
-    registerReplicas(dn1, container1);
-
     deadNodeHandler.onMessage(dn1, eventQueue);
     Assert.assertTrue(logCapturer.getOutput().contains(
-        "Exception while removing container replica "));
+        "DeadNode event for a unregistered node"));
   }
 
   private void registerReplicas(ContainerManager containerManager,
@@ -283,6 +286,7 @@ public class TestDeadNodeHandler {
       containerManager.updateContainerReplica(
           new ContainerID(container.getContainerID()),
           ContainerReplica.newBuilder()
+              .setContainerState(ContainerReplicaProto.State.OPEN)
               .setContainerID(container.containerID())
               .setDatanodeDetails(datanode).build());
     }
@@ -290,9 +294,9 @@ public class TestDeadNodeHandler {
 
   private void registerReplicas(DatanodeDetails datanode,
       ContainerInfo... containers)
-      throws SCMException {
+      throws NodeNotFoundException {
     nodeManager
-        .addDatanodeInContainerMap(datanode.getUuid(),
+        .setContainers(datanode,
             Arrays.stream(containers)
                 .map(container -> new ContainerID(container.getContainerID()))
                 .collect(Collectors.toSet()));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c80f753b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/TestCloseContainerWatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/TestCloseContainerWatcher.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/TestCloseContainerWatcher.java
deleted file mode 100644
index ec13534..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/TestCloseContainerWatcher.java
+++ /dev/null
@@ -1,289 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container;
-
-import org.apache.hadoop.hdds.HddsIdFactory;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.CommandStatus;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
-import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler
-    .CloseContainerStatus;
-import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler
-    .CloseContainerRetryableReq;
-import org.apache.hadoop.hdds.scm.container.CloseContainerWatcher;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.server.events.EventHandler;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.hdds.server.events.EventWatcher;
-import org.apache.hadoop.ozone.lease.LeaseManager;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-import org.mockito.Mockito;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.slf4j.event.Level;
-
-import java.io.IOException;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.when;
-
-/**
- * Test class for {@link CloseContainerWatcher}.
- * */
-public class TestCloseContainerWatcher implements EventHandler<ContainerID> {
-
-  private static final Logger LOG = LoggerFactory
-      .getLogger(TestCloseContainerWatcher.class);
-  private static EventWatcher<CloseContainerRetryableReq, CloseContainerStatus>
-      watcher;
-  private static LeaseManager<Long> leaseManager;
-  private static SCMContainerManager containerManager = Mockito
-      .mock(SCMContainerManager.class);
-  private static EventQueue queue;
-  @Rule
-  public Timeout timeout = new Timeout(1000*15);
-
-  @After
-  public void stop() {
-    leaseManager.shutdown();
-    queue.close();
-  }
-
-  /*
-   * This test will test watcher for Failure status event.
-   * */
-  @Test
-  public void testWatcherForFailureStatusEvent() throws
-      InterruptedException, IOException {
-    setupWatcher(90000L);
-    long id1 = HddsIdFactory.getLongId();
-    long id2 = HddsIdFactory.getLongId();
-    queue.addHandler(SCMEvents.CLOSE_CONTAINER, this);
-    setupMock(id1, id2, true);
-    GenericTestUtils.LogCapturer testLogger = GenericTestUtils.LogCapturer
-        .captureLogs(LOG);
-    GenericTestUtils.LogCapturer watcherLogger = GenericTestUtils.LogCapturer
-        .captureLogs(CloseContainerWatcher.LOG);
-    GenericTestUtils.setLogLevel(CloseContainerWatcher.LOG, Level.TRACE);
-    testLogger.clearOutput();
-    watcherLogger.clearOutput();
-
-    CommandStatus cmdStatus1 = CommandStatus.newBuilder()
-        .setCmdId(id1)
-        .setStatus(CommandStatus.Status.FAILED)
-        .setType(Type.closeContainerCommand).build();
-    CommandStatus cmdStatus2 = CommandStatus.newBuilder()
-        .setCmdId(id2)
-        .setStatus(CommandStatus.Status.FAILED)
-        .setType(Type.closeContainerCommand).build();
-
-    // File events to watcher
-    queue.fireEvent(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ,
-        new CloseContainerRetryableReq(ContainerID.valueof(id1)));
-    queue.fireEvent(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ,
-        new CloseContainerRetryableReq(ContainerID.valueof(id2)));
-    Thread.sleep(10L);
-    queue.fireEvent(SCMEvents.CLOSE_CONTAINER_STATUS, new
-        CloseContainerStatus(cmdStatus1));
-    queue.fireEvent(SCMEvents.CLOSE_CONTAINER_STATUS, new
-        CloseContainerStatus(cmdStatus2));
-
-    Thread.sleep(1000*4L);
-    // validation
-    assertTrue(watcherLogger.getOutput().contains("CloseContainerCommand for " +
-        "containerId: " + id1 + " executed"));
-    assertTrue(watcherLogger.getOutput().contains("CloseContainerCommand for " +
-        "containerId: " + id2 + " executed"));
-    assertTrue(
-        testLogger.getOutput().contains("Handling closeContainerEvent " +
-            "for containerId: id=" + id1));
-    assertTrue(testLogger.getOutput().contains("Handling closeContainerEvent " +
-        "for containerId: id=" + id2));
-
-  }
-
-  @Test
-  public void testWatcherForPendingStatusEvent() throws
-      InterruptedException, IOException {
-    setupWatcher(90000L);
-    long id1 = HddsIdFactory.getLongId();
-    long id2 = HddsIdFactory.getLongId();
-    queue.addHandler(SCMEvents.CLOSE_CONTAINER, this);
-    setupMock(id1, id2, true);
-    GenericTestUtils.LogCapturer testLogger = GenericTestUtils.LogCapturer
-        .captureLogs(LOG);
-    GenericTestUtils.LogCapturer watcherLogger = GenericTestUtils.LogCapturer
-        .captureLogs(CloseContainerWatcher.LOG);
-    GenericTestUtils.setLogLevel(CloseContainerWatcher.LOG, Level.TRACE);
-    testLogger.clearOutput();
-    watcherLogger.clearOutput();
-
-    CommandStatus cmdStatus1 = CommandStatus.newBuilder()
-        .setCmdId(id1)
-        .setStatus(CommandStatus.Status.PENDING)
-        .setType(Type.closeContainerCommand).build();
-    CommandStatus cmdStatus2 = CommandStatus.newBuilder()
-        .setCmdId(id2)
-        .setStatus(CommandStatus.Status.PENDING)
-        .setType(Type.closeContainerCommand).build();
-
-    // File events to watcher
-    queue.fireEvent(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ,
-        new CloseContainerRetryableReq(ContainerID.valueof(id1)));
-    queue.fireEvent(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ,
-        new CloseContainerRetryableReq(ContainerID.valueof(id2)));
-    Thread.sleep(10L);
-    queue.fireEvent(SCMEvents.CLOSE_CONTAINER_STATUS, new
-        CloseContainerStatus(cmdStatus1));
-    queue.fireEvent(SCMEvents.CLOSE_CONTAINER_STATUS, new
-        CloseContainerStatus(cmdStatus2));
-
-    Thread.sleep(1000*2L);
-    // validation
-    assertFalse(watcherLogger.getOutput().contains("CloseContainerCommand "
-        + "for containerId: " + id1 + " executed"));
-    assertFalse(watcherLogger.getOutput().contains("CloseContainerCommand "
-        + "for containerId: " + id2 + " executed"));
-    assertFalse(testLogger.getOutput().contains("Handling "
-        + "closeContainerEvent for containerId: id=" + id1));
-    assertFalse(testLogger.getOutput().contains("Handling "
-        + "closeContainerEvent for containerId: id=" + id2));
-
-  }
-
-  @Test
-  public void testWatcherForExecutedStatusEvent()
-      throws IOException, InterruptedException {
-    setupWatcher(90000L);
-    long id1 = HddsIdFactory.getLongId();
-    long id2 = HddsIdFactory.getLongId();
-    queue.addHandler(SCMEvents.CLOSE_CONTAINER, this);
-    setupMock(id1, id2, true);
-    GenericTestUtils.LogCapturer testLogger = GenericTestUtils.LogCapturer
-        .captureLogs(LOG);
-    GenericTestUtils.LogCapturer watcherLogger = GenericTestUtils.LogCapturer
-        .captureLogs(CloseContainerWatcher.LOG);
-    GenericTestUtils.setLogLevel(CloseContainerWatcher.LOG, Level.TRACE);
-    testLogger.clearOutput();
-    watcherLogger.clearOutput();
-
-    // When both of the pending event are executed successfully by DataNode
-    CommandStatus cmdStatus1 = CommandStatus.newBuilder()
-        .setCmdId(id1)
-        .setStatus(CommandStatus.Status.EXECUTED)
-        .setType(Type.closeContainerCommand).build();
-    CommandStatus cmdStatus2 = CommandStatus.newBuilder()
-        .setCmdId(id2)
-        .setStatus(CommandStatus.Status.EXECUTED)
-        .setType(Type.closeContainerCommand).build();
-    // File events to watcher
-    testLogger.clearOutput();
-    watcherLogger.clearOutput();
-    queue.fireEvent(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ,
-        new CloseContainerRetryableReq(ContainerID.valueof(id1)));
-    queue.fireEvent(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ,
-        new CloseContainerRetryableReq(ContainerID.valueof(id2)));
-    Thread.sleep(10L);
-    queue.fireEvent(SCMEvents.CLOSE_CONTAINER_STATUS,
-        new CloseContainerStatus(cmdStatus1));
-    queue.fireEvent(SCMEvents.CLOSE_CONTAINER_STATUS,
-        new CloseContainerStatus(cmdStatus2));
-
-    Thread.sleep(1000*3L);
-    // validation
-    assertTrue(watcherLogger.getOutput().contains("CloseContainerCommand "
-        + "for containerId: " + id1 + " executed"));
-    assertTrue(watcherLogger.getOutput().contains("CloseContainerCommand "
-        + "for containerId: " + id2 + " executed"));
-    assertFalse(testLogger.getOutput().contains("Handling "
-        + "closeContainerEvent for containerId: id=" + id1));
-    assertFalse(testLogger.getOutput().contains("Handling "
-        + "closeContainerEvent for containerId: id=" + id2));
-  }
-
-  private void setupWatcher(long time) {
-    leaseManager = new LeaseManager<>("TestCloseContainerWatcher#LeaseManager",
-        time);
-    leaseManager.start();
-    watcher = new CloseContainerWatcher(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ,
-        SCMEvents.CLOSE_CONTAINER_STATUS, leaseManager, containerManager);
-    queue = new EventQueue();
-    watcher.start(queue);
-  }
-
-  /*
-   * This test will fire two retryable closeContainer events. Both will timeout.
-   * First event container will be open at time of handling so it should be
-   * sent back to appropriate handler. Second event container will be closed,
-   * so it should not be retried.
-   * */
-  @Test
-  public void testWatcherRetryableTimeoutHandling() throws InterruptedException,
-      IOException {
-
-    long id1 = HddsIdFactory.getLongId();
-    long id2 = HddsIdFactory.getLongId();
-    setupWatcher(1000L);
-    queue.addHandler(SCMEvents.CLOSE_CONTAINER, this);
-    setupMock(id1, id2, false);
-    GenericTestUtils.LogCapturer testLogger = GenericTestUtils.LogCapturer
-        .captureLogs(LOG);
-    testLogger.clearOutput();
-
-    // File events to watcher
-    queue.fireEvent(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ,
-        new CloseContainerRetryableReq(ContainerID.valueof(id1)));
-    queue.fireEvent(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ,
-        new CloseContainerRetryableReq(ContainerID.valueof(id2)));
-
-    Thread.sleep(1000L + 10);
-
-    // validation
-    assertTrue(testLogger.getOutput().contains("Handling "
-        + "closeContainerEvent for containerId: id=" + id1));
-    assertFalse(testLogger.getOutput().contains("Handling "
-        + "closeContainerEvent for containerId: id=" + id2));
-  }
-
-
-  private void setupMock(long id1, long id2, boolean isOpen)
-      throws IOException {
-    ContainerInfo containerInfo = Mockito.mock(ContainerInfo.class);
-    ContainerInfo containerInfo2 = Mockito.mock(ContainerInfo.class);
-    when(containerManager.getContainer(ContainerID.valueof(id1)))
-        .thenReturn(containerInfo);
-    when(containerManager.getContainer(ContainerID.valueof(id2)))
-        .thenReturn(containerInfo2);
-    when(containerInfo.isOpen()).thenReturn(true);
-    when(containerInfo2.isOpen()).thenReturn(isOpen);
-  }
-
-  @Override
-  public void onMessage(ContainerID containerID, EventPublisher publisher) {
-    LOG.info("Handling closeContainerEvent for containerId: {}", containerID);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c80f753b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
index 3a2ce78..a089621 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
@@ -413,17 +413,13 @@ public class TestEndPoint {
           serverAddress, 3000);
       Map<Long, CommandStatus> map = stateContext.getCommandStatusMap();
       assertNotNull(map);
-      assertEquals("Should have 3 objects", 3, map.size());
-      assertTrue(map.containsKey(Long.valueOf(1)));
+      assertEquals("Should have 2 objects", 2, map.size());
       assertTrue(map.containsKey(Long.valueOf(2)));
       assertTrue(map.containsKey(Long.valueOf(3)));
-      assertTrue(map.get(Long.valueOf(1)).getType()
-          .equals(Type.closeContainerCommand));
       assertTrue(map.get(Long.valueOf(2)).getType()
           .equals(Type.replicateContainerCommand));
       assertTrue(
           map.get(Long.valueOf(3)).getType().equals(Type.deleteBlocksCommand));
-      assertTrue(map.get(Long.valueOf(1)).getStatus().equals(Status.PENDING));
       assertTrue(map.get(Long.valueOf(2)).getStatus().equals(Status.PENDING));
       assertTrue(map.get(Long.valueOf(3)).getStatus().equals(Status.PENDING));
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c80f753b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
index fbc3420..2d786c5 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
@@ -24,7 +24,6 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.node.CommandQueue;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
@@ -34,7 +33,6 @@ import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
-import org.apache.hadoop.hdds.scm.node.states.ReportResult;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.ozone.protocol.VersionResponse;
 import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
@@ -76,19 +74,6 @@ public class ReplicationNodeManagerMock implements NodeManager {
   }
 
   /**
-   * Removes a data node from the management of this Node Manager.
-   *
-   * @param node - DataNode.
-   * @throws NodeNotFoundException
-   */
-  @Override
-  public void removeNode(DatanodeDetails node)
-      throws NodeNotFoundException {
-    nodeStateMap.remove(node);
-
-  }
-
-  /**
    * Gets all Live Datanodes that is currently communicating with SCM.
    *
    * @param nodestate - State of the node
@@ -170,7 +155,7 @@ public class ReplicationNodeManagerMock implements NodeManager {
    * @return Set of PipelineID
    */
   @Override
-  public Set<PipelineID> getPipelineByDnID(UUID dnId) {
+  public Set<PipelineID> getPipelines(DatanodeDetails dnId) {
     throw new UnsupportedOperationException("Not yet implemented");
   }
 
@@ -196,24 +181,12 @@ public class ReplicationNodeManagerMock implements NodeManager {
    * Update set of containers available on a datanode.
    * @param uuid - DatanodeID
    * @param containerIds - Set of containerIDs
-   * @throws SCMException - if datanode is not known. For new datanode use
-   *                        addDatanodeInContainerMap call.
+   * @throws NodeNotFoundException - if datanode is not known. For new datanode
+   *                                 use addDatanodeInContainerMap call.
    */
   @Override
-  public void setContainersForDatanode(UUID uuid, Set<ContainerID> containerIds)
-      throws SCMException {
-    throw new UnsupportedOperationException("Not yet implemented");
-  }
-
-  /**
-   * Process containerReport received from datanode.
-   * @param uuid - DataonodeID
-   * @param containerIds - Set of containerIDs
-   * @return The result after processing containerReport
-   */
-  @Override
-  public ReportResult<ContainerID> processContainerReport(UUID uuid,
-      Set<ContainerID> containerIds) {
+  public void setContainers(DatanodeDetails uuid, Set<ContainerID> containerIds)
+      throws NodeNotFoundException {
     throw new UnsupportedOperationException("Not yet implemented");
   }
 
@@ -223,20 +196,7 @@ public class ReplicationNodeManagerMock implements NodeManager {
    * @return - set of containerIDs
    */
   @Override
-  public Set<ContainerID> getContainers(UUID uuid) {
-    throw new UnsupportedOperationException("Not yet implemented");
-  }
-
-  /**
-   * Insert a new datanode with set of containerIDs for containers available
-   * on it.
-   * @param uuid - DatanodeID
-   * @param containerIDs - Set of ContainerIDs
-   * @throws SCMException - if datanode already exists
-   */
-  @Override
-  public void addDatanodeInContainerMap(UUID uuid,
-      Set<ContainerID> containerIDs) throws SCMException {
+  public Set<ContainerID> getContainers(DatanodeDetails uuid) {
     throw new UnsupportedOperationException("Not yet implemented");
   }
 
@@ -329,7 +289,8 @@ public class ReplicationNodeManagerMock implements NodeManager {
    * @param nodeReport
    */
   @Override
-  public void processNodeReport(UUID dnUuid, NodeReportProto nodeReport) {
+  public void processNodeReport(DatanodeDetails dnUuid,
+                                NodeReportProto nodeReport) {
     // do nothing.
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c80f753b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
index 03c99ef..143c4e3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
@@ -21,14 +21,14 @@ import java.util.UUID;
 import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.states.ContainerStateMap;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -39,8 +39,6 @@ import java.util.List;
 import java.util.NavigableSet;
 import java.util.concurrent.TimeoutException;
 
-import org.slf4j.event.Level;
-
 /**
  * Tests for ContainerStateManager.
  */
@@ -317,9 +315,6 @@ public class TestContainerStateManagerIntegration {
 
   @Test
   public void testReplicaMap() throws Exception {
-    GenericTestUtils.setLogLevel(ContainerStateMap.getLOG(), Level.DEBUG);
-    GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
-        .captureLogs(ContainerStateMap.getLOG());
     DatanodeDetails dn1 = DatanodeDetails.newBuilder().setHostName("host1")
         .setIpAddress("1.1.1.1")
         .setUuid(UUID.randomUUID().toString()).build();
@@ -347,10 +342,12 @@ public class TestContainerStateManagerIntegration {
     // Test 2: Add replica nodes and then test
     ContainerReplica replicaOne = ContainerReplica.newBuilder()
         .setContainerID(id)
+        .setContainerState(ContainerReplicaProto.State.OPEN)
         .setDatanodeDetails(dn1)
         .build();
     ContainerReplica replicaTwo = ContainerReplica.newBuilder()
         .setContainerID(id)
+        .setContainerState(ContainerReplicaProto.State.OPEN)
         .setDatanodeDetails(dn2)
         .build();
     containerStateManager.updateContainerReplica(id, replicaOne);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c80f753b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
index 74cbea4..88b5f7f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
@@ -93,7 +93,7 @@ public class TestNode2PipelineMap {
 
     // get pipeline details by dnid
     Set<PipelineID> pipelines = scm.getScmNodeManager()
-        .getPipelineByDnID(dns.get(0).getUuid());
+        .getPipelines(dns.get(0));
     Assert.assertEquals(1, pipelines.size());
     pipelines.forEach(p -> Assert.assertEquals(p,
         ratisContainer.getPipeline().getId()));
@@ -116,7 +116,7 @@ public class TestNode2PipelineMap {
     pipelineManager.finalizePipeline(ratisContainer.getPipeline().getId());
     pipelineManager.removePipeline(ratisContainer.getPipeline().getId());
     pipelines = scm.getScmNodeManager()
-        .getPipelineByDnID(dns.get(0).getUuid());
+        .getPipelines(dns.get(0));
     Assert.assertEquals(0, pipelines.size());
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c80f753b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
index 42d3063..6121a65 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
@@ -115,8 +115,8 @@ public class TestPipelineClose {
     pipelineManager.removePipeline(pipeline1.getId());
     for (DatanodeDetails dn : ratisContainer1.getPipeline().getNodes()) {
       // Assert that the pipeline has been removed from Node2PipelineMap as well
-      Assert.assertEquals(scm.getScmNodeManager().getPipelineByDnID(
-          dn.getUuid()).size(), 0);
+      Assert.assertEquals(scm.getScmNodeManager().getPipelines(
+          dn).size(), 0);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c80f753b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
index c49a98b..5f57e7b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
@@ -243,8 +243,10 @@ public class TestBlockDeletion {
     ContainerReportsProto dummyReport = dummyReportsBuilder.build();
 
     logCapturer.clearOutput();
-    scm.getContainerManager().processContainerReports(
-        cluster.getHddsDatanodes().get(0).getDatanodeDetails(), dummyReport);
+    cluster.getHddsDatanodes().get(0)
+        .getDatanodeStateMachine().getContext().addReport(dummyReport);
+    cluster.getHddsDatanodes().get(0)
+        .getDatanodeStateMachine().triggerHeartbeat();
     // wait for event to be handled by event handler
     Thread.sleep(1000);
     String output = logCapturer.getOutput();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message