hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sun...@apache.org
Subject [09/51] [partial] hadoop git commit: HADOOP-15791. Remove Ozone related sources from the 3.2 branch. Contributed by Elek, Marton.
Date Fri, 05 Oct 2018 12:56:04 GMT
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java
deleted file mode 100644
index f3cd4ea..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java
+++ /dev/null
@@ -1,138 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.server;
-
-import java.io.IOException;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
-import org.apache.hadoop.hdds.scm.server.
-    SCMDatanodeHeartbeatDispatcher.CommandStatusReportFromDatanode;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
-    .ContainerReportFromDatanode;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
-    .NodeReportFromDatanode;
-import org.apache.hadoop.hdds.server.events.Event;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-
-import org.junit.Assert;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import static org.apache.hadoop.hdds.scm.events.SCMEvents.CONTAINER_REPORT;
-import static org.apache.hadoop.hdds.scm.events.SCMEvents.NODE_REPORT;
-import static org.apache.hadoop.hdds.scm.events.SCMEvents.CMD_STATUS_REPORT;
-
-/**
- * This class tests the behavior of SCMDatanodeHeartbeatDispatcher.
- */
-public class TestSCMDatanodeHeartbeatDispatcher {
-
-
-  @Test
-  public void testNodeReportDispatcher() throws IOException {
-
-    AtomicInteger eventReceived = new AtomicInteger();
-
-    NodeReportProto nodeReport = NodeReportProto.getDefaultInstance();
-
-    SCMDatanodeHeartbeatDispatcher dispatcher =
-        new SCMDatanodeHeartbeatDispatcher(Mockito.mock(NodeManager.class),
-            new EventPublisher() {
-          @Override
-          public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void fireEvent(
-              EVENT_TYPE event, PAYLOAD payload) {
-            Assert.assertEquals(event, NODE_REPORT);
-            eventReceived.incrementAndGet();
-            Assert.assertEquals(nodeReport,
-                ((NodeReportFromDatanode)payload).getReport());
-
-          }
-        });
-
-    DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
-
-    SCMHeartbeatRequestProto heartbeat =
-        SCMHeartbeatRequestProto.newBuilder()
-        .setDatanodeDetails(datanodeDetails.getProtoBufMessage())
-        .setNodeReport(nodeReport)
-        .build();
-    dispatcher.dispatch(heartbeat);
-    Assert.assertEquals(1, eventReceived.get());
-
-
-  }
-
-  @Test
-  public void testContainerReportDispatcher() throws IOException {
-
-
-    AtomicInteger eventReceived = new AtomicInteger();
-
-    ContainerReportsProto containerReport =
-        ContainerReportsProto.getDefaultInstance();
-    CommandStatusReportsProto commandStatusReport =
-        CommandStatusReportsProto.getDefaultInstance();
-
-    SCMDatanodeHeartbeatDispatcher dispatcher =
-        new SCMDatanodeHeartbeatDispatcher(Mockito.mock(NodeManager.class),
-            new EventPublisher() {
-          @Override
-          public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void fireEvent(
-              EVENT_TYPE event, PAYLOAD payload) {
-            Assert.assertTrue(
-                event.equals(CONTAINER_REPORT)
-                    || event.equals(CMD_STATUS_REPORT));
-
-            if (payload instanceof ContainerReportFromDatanode) {
-              Assert.assertEquals(containerReport,
-                  ((ContainerReportFromDatanode) payload).getReport());
-            }
-            if (payload instanceof CommandStatusReportFromDatanode) {
-              Assert.assertEquals(commandStatusReport,
-                  ((CommandStatusReportFromDatanode) payload).getReport());
-            }
-            eventReceived.incrementAndGet();
-          }
-        });
-
-    DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
-
-    SCMHeartbeatRequestProto heartbeat =
-        SCMHeartbeatRequestProto.newBuilder()
-            .setDatanodeDetails(datanodeDetails.getProtoBufMessage())
-            .setContainerReport(containerReport)
-            .addCommandStatusReports(commandStatusReport)
-            .build();
-    dispatcher.dispatch(heartbeat);
-    Assert.assertEquals(2, eventReceived.get());
-
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/TestCloseContainerWatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/TestCloseContainerWatcher.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/TestCloseContainerWatcher.java
deleted file mode 100644
index 56c3830..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/TestCloseContainerWatcher.java
+++ /dev/null
@@ -1,287 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container;
-
-import org.apache.hadoop.hdds.HddsIdFactory;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.CommandStatus;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
-import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler
-    .CloseContainerStatus;
-import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler
-    .CloseContainerRetryableReq;
-import org.apache.hadoop.hdds.scm.container.CloseContainerWatcher;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerMapping;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.server.events.EventHandler;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.hdds.server.events.EventWatcher;
-import org.apache.hadoop.ozone.lease.LeaseManager;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-import org.mockito.Mockito;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.slf4j.event.Level;
-
-import java.io.IOException;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.when;
-
-/**
- * Test class for {@link CloseContainerWatcher}.
- * */
-public class TestCloseContainerWatcher implements EventHandler<ContainerID> {
-
-  private static final Logger LOG = LoggerFactory
-      .getLogger(TestCloseContainerWatcher.class);
-  private static EventWatcher<CloseContainerRetryableReq, CloseContainerStatus>
-      watcher;
-  private static LeaseManager<Long> leaseManager;
-  private static ContainerMapping containerMapping = Mockito
-      .mock(ContainerMapping.class);
-  private static EventQueue queue;
-  @Rule
-  public Timeout timeout = new Timeout(1000*15);
-
-  @After
-  public void stop() {
-    leaseManager.shutdown();
-    queue.close();
-  }
-
-  /*
-   * This test will test watcher for Failure status event.
-   * */
-  @Test
-  public void testWatcherForFailureStatusEvent() throws
-      InterruptedException, IOException {
-    setupWatcher(90000L);
-    long id1 = HddsIdFactory.getLongId();
-    long id2 = HddsIdFactory.getLongId();
-    queue.addHandler(SCMEvents.CLOSE_CONTAINER, this);
-    setupMock(id1, id2, true);
-    GenericTestUtils.LogCapturer testLogger = GenericTestUtils.LogCapturer
-        .captureLogs(LOG);
-    GenericTestUtils.LogCapturer watcherLogger = GenericTestUtils.LogCapturer
-        .captureLogs(CloseContainerWatcher.LOG);
-    GenericTestUtils.setLogLevel(CloseContainerWatcher.LOG, Level.TRACE);
-    testLogger.clearOutput();
-    watcherLogger.clearOutput();
-
-    CommandStatus cmdStatus1 = CommandStatus.newBuilder()
-        .setCmdId(id1)
-        .setStatus(CommandStatus.Status.FAILED)
-        .setType(Type.closeContainerCommand).build();
-    CommandStatus cmdStatus2 = CommandStatus.newBuilder()
-        .setCmdId(id2)
-        .setStatus(CommandStatus.Status.FAILED)
-        .setType(Type.closeContainerCommand).build();
-
-    // File events to watcher
-    queue.fireEvent(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ,
-        new CloseContainerRetryableReq(ContainerID.valueof(id1)));
-    queue.fireEvent(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ,
-        new CloseContainerRetryableReq(ContainerID.valueof(id2)));
-    Thread.sleep(10L);
-    queue.fireEvent(SCMEvents.CLOSE_CONTAINER_STATUS, new
-        CloseContainerStatus(cmdStatus1));
-    queue.fireEvent(SCMEvents.CLOSE_CONTAINER_STATUS, new
-        CloseContainerStatus(cmdStatus2));
-
-    Thread.sleep(1000*4L);
-    // validation
-    assertTrue(watcherLogger.getOutput().contains("CloseContainerCommand for " +
-        "containerId: " + id1 + " executed"));
-    assertTrue(watcherLogger.getOutput().contains("CloseContainerCommand for " +
-        "containerId: " + id2 + " executed"));
-    assertTrue(
-        testLogger.getOutput().contains("Handling closeContainerEvent " +
-            "for containerId: id=" + id1));
-    assertTrue(testLogger.getOutput().contains("Handling closeContainerEvent " +
-        "for containerId: id=" + id2));
-
-  }
-
-  @Test
-  public void testWatcherForPendingStatusEvent() throws
-      InterruptedException, IOException {
-    setupWatcher(90000L);
-    long id1 = HddsIdFactory.getLongId();
-    long id2 = HddsIdFactory.getLongId();
-    queue.addHandler(SCMEvents.CLOSE_CONTAINER, this);
-    setupMock(id1, id2, true);
-    GenericTestUtils.LogCapturer testLogger = GenericTestUtils.LogCapturer
-        .captureLogs(LOG);
-    GenericTestUtils.LogCapturer watcherLogger = GenericTestUtils.LogCapturer
-        .captureLogs(CloseContainerWatcher.LOG);
-    GenericTestUtils.setLogLevel(CloseContainerWatcher.LOG, Level.TRACE);
-    testLogger.clearOutput();
-    watcherLogger.clearOutput();
-
-    CommandStatus cmdStatus1 = CommandStatus.newBuilder()
-        .setCmdId(id1)
-        .setStatus(CommandStatus.Status.PENDING)
-        .setType(Type.closeContainerCommand).build();
-    CommandStatus cmdStatus2 = CommandStatus.newBuilder()
-        .setCmdId(id2)
-        .setStatus(CommandStatus.Status.PENDING)
-        .setType(Type.closeContainerCommand).build();
-
-    // File events to watcher
-    queue.fireEvent(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ,
-        new CloseContainerRetryableReq(ContainerID.valueof(id1)));
-    queue.fireEvent(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ,
-        new CloseContainerRetryableReq(ContainerID.valueof(id2)));
-    Thread.sleep(10L);
-    queue.fireEvent(SCMEvents.CLOSE_CONTAINER_STATUS, new
-        CloseContainerStatus(cmdStatus1));
-    queue.fireEvent(SCMEvents.CLOSE_CONTAINER_STATUS, new
-        CloseContainerStatus(cmdStatus2));
-
-    Thread.sleep(1000*2L);
-    // validation
-    assertFalse(watcherLogger.getOutput().contains("CloseContainerCommand "
-        + "for containerId: " + id1 + " executed"));
-    assertFalse(watcherLogger.getOutput().contains("CloseContainerCommand "
-        + "for containerId: " + id2 + " executed"));
-    assertFalse(testLogger.getOutput().contains("Handling "
-        + "closeContainerEvent for containerId: id=" + id1));
-    assertFalse(testLogger.getOutput().contains("Handling "
-        + "closeContainerEvent for containerId: id=" + id2));
-
-  }
-
-  @Test
-  public void testWatcherForExecutedStatusEvent()
-      throws IOException, InterruptedException {
-    setupWatcher(90000L);
-    long id1 = HddsIdFactory.getLongId();
-    long id2 = HddsIdFactory.getLongId();
-    queue.addHandler(SCMEvents.CLOSE_CONTAINER, this);
-    setupMock(id1, id2, true);
-    GenericTestUtils.LogCapturer testLogger = GenericTestUtils.LogCapturer
-        .captureLogs(LOG);
-    GenericTestUtils.LogCapturer watcherLogger = GenericTestUtils.LogCapturer
-        .captureLogs(CloseContainerWatcher.LOG);
-    GenericTestUtils.setLogLevel(CloseContainerWatcher.LOG, Level.TRACE);
-    testLogger.clearOutput();
-    watcherLogger.clearOutput();
-
-    // When both of the pending event are executed successfully by DataNode
-    CommandStatus cmdStatus1 = CommandStatus.newBuilder()
-        .setCmdId(id1)
-        .setStatus(CommandStatus.Status.EXECUTED)
-        .setType(Type.closeContainerCommand).build();
-    CommandStatus cmdStatus2 = CommandStatus.newBuilder()
-        .setCmdId(id2)
-        .setStatus(CommandStatus.Status.EXECUTED)
-        .setType(Type.closeContainerCommand).build();
-    // File events to watcher
-    testLogger.clearOutput();
-    watcherLogger.clearOutput();
-    queue.fireEvent(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ,
-        new CloseContainerRetryableReq(ContainerID.valueof(id1)));
-    queue.fireEvent(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ,
-        new CloseContainerRetryableReq(ContainerID.valueof(id2)));
-    Thread.sleep(10L);
-    queue.fireEvent(SCMEvents.CLOSE_CONTAINER_STATUS,
-        new CloseContainerStatus(cmdStatus1));
-    queue.fireEvent(SCMEvents.CLOSE_CONTAINER_STATUS,
-        new CloseContainerStatus(cmdStatus2));
-
-    Thread.sleep(1000*3L);
-    // validation
-    assertTrue(watcherLogger.getOutput().contains("CloseContainerCommand "
-        + "for containerId: " + id1 + " executed"));
-    assertTrue(watcherLogger.getOutput().contains("CloseContainerCommand "
-        + "for containerId: " + id2 + " executed"));
-    assertFalse(testLogger.getOutput().contains("Handling "
-        + "closeContainerEvent for containerId: id=" + id1));
-    assertFalse(testLogger.getOutput().contains("Handling "
-        + "closeContainerEvent for containerId: id=" + id2));
-  }
-
-  private void setupWatcher(long time) {
-    leaseManager = new LeaseManager<>("TestCloseContainerWatcher#LeaseManager",
-        time);
-    leaseManager.start();
-    watcher = new CloseContainerWatcher(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ,
-        SCMEvents.CLOSE_CONTAINER_STATUS, leaseManager, containerMapping);
-    queue = new EventQueue();
-    watcher.start(queue);
-  }
-
-  /*
-   * This test will fire two retryable closeContainer events. Both will timeout.
-   * First event container will be open at time of handling so it should be
-   * sent back to appropriate handler. Second event container will be closed,
-   * so it should not be retried.
-   * */
-  @Test
-  public void testWatcherRetryableTimeoutHandling() throws InterruptedException,
-      IOException {
-
-    long id1 = HddsIdFactory.getLongId();
-    long id2 = HddsIdFactory.getLongId();
-    setupWatcher(1000L);
-    queue.addHandler(SCMEvents.CLOSE_CONTAINER, this);
-    setupMock(id1, id2, false);
-    GenericTestUtils.LogCapturer testLogger = GenericTestUtils.LogCapturer
-        .captureLogs(LOG);
-    testLogger.clearOutput();
-
-    // File events to watcher
-    queue.fireEvent(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ,
-        new CloseContainerRetryableReq(ContainerID.valueof(id1)));
-    queue.fireEvent(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ,
-        new CloseContainerRetryableReq(ContainerID.valueof(id2)));
-
-    Thread.sleep(1000L + 10);
-
-    // validation
-    assertTrue(testLogger.getOutput().contains("Handling "
-        + "closeContainerEvent for containerId: id=" + id1));
-    assertFalse(testLogger.getOutput().contains("Handling "
-        + "closeContainerEvent for containerId: id=" + id2));
-  }
-
-
-  private void setupMock(long id1, long id2, boolean isOpen)
-      throws IOException {
-    ContainerInfo containerInfo = Mockito.mock(ContainerInfo.class);
-    ContainerInfo containerInfo2 = Mockito.mock(ContainerInfo.class);
-    when(containerMapping.getContainer(id1)).thenReturn(containerInfo);
-    when(containerMapping.getContainer(id2)).thenReturn(containerInfo2);
-    when(containerInfo.isContainerOpen()).thenReturn(true);
-    when(containerInfo2.isContainerOpen()).thenReturn(isOpen);
-  }
-
-  @Override
-  public void onMessage(ContainerID containerID, EventPublisher publisher) {
-    LOG.info("Handling closeContainerEvent for containerId: {}", containerID);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
deleted file mode 100644
index 390746f..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++ /dev/null
@@ -1,530 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common;
-
-import java.util.List;
-import java.util.Map;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.CloseContainerCommandProto;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.CommandStatus.Status;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.DeleteBlocksCommandProto;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.ReplicateContainerCommandProto;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.SCMCommandProto;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.VersionInfo;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.StorageReportProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .DatanodeStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .EndpointStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.common.states.endpoint
-    .HeartbeatEndpointTask;
-import org.apache.hadoop.ozone.container.common.states.endpoint
-    .RegisterEndpointTask;
-import org.apache.hadoop.ozone.container.common.states.endpoint
-    .VersionEndpointTask;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.ozone.protocol.commands.CommandStatus;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.PathUtils;
-import org.apache.hadoop.util.Time;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import static org.mockito.Mockito.mock;
-
-import java.io.File;
-import java.net.InetSocketAddress;
-import java.util.UUID;
-
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS;
-import static org.apache.hadoop.ozone.container.common.ContainerTestUtils
-    .createEndpoint;
-import static org.hamcrest.Matchers.lessThanOrEqualTo;
-import static org.mockito.Mockito.when;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-
-/**
- * Tests the endpoints.
- */
-public class TestEndPoint {
-  private static InetSocketAddress serverAddress;
-  private static RPC.Server scmServer;
-  private static ScmTestMock scmServerImpl;
-  private static File testDir;
-  private static Configuration config;
-
-  @AfterClass
-  public static void tearDown() throws Exception {
-    if (scmServer != null) {
-      scmServer.stop();
-    }
-    FileUtil.fullyDelete(testDir);
-  }
-
-  @BeforeClass
-  public static void setUp() throws Exception {
-    serverAddress = SCMTestUtils.getReuseableAddress();
-    scmServerImpl = new ScmTestMock();
-    scmServer = SCMTestUtils.startScmRpcServer(SCMTestUtils.getConf(),
-        scmServerImpl, serverAddress, 10);
-    testDir = PathUtils.getTestDir(TestEndPoint.class);
-    config = SCMTestUtils.getConf();
-    config.set(DFS_DATANODE_DATA_DIR_KEY, testDir.getAbsolutePath());
-    config.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath());
-    config
-        .setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true);
-    config.set(HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL, "1s");
-  }
-
-  @Test
-  /**
-   * This test asserts that we are able to make a version call to SCM server
-   * and gets back the expected values.
-   */
-  public void testGetVersion() throws Exception {
-    try (EndpointStateMachine rpcEndPoint =
-             createEndpoint(SCMTestUtils.getConf(),
-                 serverAddress, 1000)) {
-      SCMVersionResponseProto responseProto = rpcEndPoint.getEndPoint()
-          .getVersion(null);
-      Assert.assertNotNull(responseProto);
-      Assert.assertEquals(VersionInfo.DESCRIPTION_KEY,
-          responseProto.getKeys(0).getKey());
-      Assert.assertEquals(VersionInfo.getLatestVersion().getDescription(),
-          responseProto.getKeys(0).getValue());
-    }
-  }
-
-  @Test
-  /**
-   * We make getVersion RPC call, but via the VersionEndpointTask which is
-   * how the state machine would make the call.
-   */
-  public void testGetVersionTask() throws Exception {
-    OzoneConfiguration conf = SCMTestUtils.getConf();
-    try (EndpointStateMachine rpcEndPoint = createEndpoint(conf,
-        serverAddress, 1000)) {
-      OzoneContainer ozoneContainer = new OzoneContainer(
-          TestUtils.randomDatanodeDetails(), conf, null);
-      rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
-      VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
-          conf, ozoneContainer);
-      EndpointStateMachine.EndPointStates newState = versionTask.call();
-
-      // if version call worked the endpoint should automatically move to the
-      // next state.
-      Assert.assertEquals(EndpointStateMachine.EndPointStates.REGISTER,
-          newState);
-
-      // Now rpcEndpoint should remember the version it got from SCM
-      Assert.assertNotNull(rpcEndPoint.getVersion());
-    }
-  }
-
-  @Test
-  public void testCheckVersionResponse() throws Exception {
-    OzoneConfiguration conf = SCMTestUtils.getConf();
-    try (EndpointStateMachine rpcEndPoint = createEndpoint(conf,
-        serverAddress, 1000)) {
-      GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
-          .captureLogs(VersionEndpointTask.LOG);
-      OzoneContainer ozoneContainer = new OzoneContainer(TestUtils
-          .randomDatanodeDetails(), conf, null);
-      rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
-      VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
-          conf, ozoneContainer);
-      EndpointStateMachine.EndPointStates newState = versionTask.call();
-
-      // if version call worked the endpoint should automatically move to the
-      // next state.
-      Assert.assertEquals(EndpointStateMachine.EndPointStates.REGISTER,
-          newState);
-
-      // Now rpcEndpoint should remember the version it got from SCM
-      Assert.assertNotNull(rpcEndPoint.getVersion());
-
-      // Now change server scmId, so datanode scmId  will be
-      // different from SCM server response scmId
-      String newScmId = UUID.randomUUID().toString();
-      scmServerImpl.setScmId(newScmId);
-      newState = versionTask.call();
-      Assert.assertEquals(EndpointStateMachine.EndPointStates.SHUTDOWN,
-            newState);
-      List<HddsVolume> volumesList = ozoneContainer.getVolumeSet()
-          .getFailedVolumesList();
-      Assert.assertTrue(volumesList.size() == 1);
-      File expectedScmDir = new File(volumesList.get(0).getHddsRootDir(),
-          scmServerImpl.getScmId());
-      Assert.assertTrue(logCapturer.getOutput().contains("expected scm " +
-          "directory " + expectedScmDir.getAbsolutePath() + " does not " +
-          "exist"));
-      Assert.assertTrue(ozoneContainer.getVolumeSet().getVolumesList().size()
-          == 0);
-      Assert.assertTrue(ozoneContainer.getVolumeSet().getFailedVolumesList()
-          .size() == 1);
-
-    }
-  }
-
-
-
-  @Test
-  /**
-   * This test makes a call to end point where there is no SCM server. We
-   * expect that versionTask should be able to handle it.
-   */
-  public void testGetVersionToInvalidEndpoint() throws Exception {
-    OzoneConfiguration conf = SCMTestUtils.getConf();
-    InetSocketAddress nonExistentServerAddress = SCMTestUtils
-        .getReuseableAddress();
-    try (EndpointStateMachine rpcEndPoint = createEndpoint(conf,
-        nonExistentServerAddress, 1000)) {
-      rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
-      OzoneContainer ozoneContainer = new OzoneContainer(
-          TestUtils.randomDatanodeDetails(), conf, null);
-      VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
-          conf, ozoneContainer);
-      EndpointStateMachine.EndPointStates newState = versionTask.call();
-
-      // This version call did NOT work, so endpoint should remain in the same
-      // state.
-      Assert.assertEquals(EndpointStateMachine.EndPointStates.GETVERSION,
-          newState);
-    }
-  }
-
-  @Test
-  /**
-   * This test makes a getVersionRPC call, but the DummyStorageServer is
-   * going to respond little slowly. We will assert that we are still in the
-   * GETVERSION state after the timeout.
-   */
-  public void testGetVersionAssertRpcTimeOut() throws Exception {
-    final long rpcTimeout = 1000;
-    final long tolerance = 100;
-    OzoneConfiguration conf = SCMTestUtils.getConf();
-
-    try (EndpointStateMachine rpcEndPoint = createEndpoint(conf,
-        serverAddress, (int) rpcTimeout)) {
-      rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
-      OzoneContainer ozoneContainer = new OzoneContainer(
-          TestUtils.randomDatanodeDetails(), conf, null);
-      VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
-          conf, ozoneContainer);
-
-      scmServerImpl.setRpcResponseDelay(1500);
-      long start = Time.monotonicNow();
-      EndpointStateMachine.EndPointStates newState = versionTask.call();
-      long end = Time.monotonicNow();
-      scmServerImpl.setRpcResponseDelay(0);
-      Assert.assertThat(end - start, lessThanOrEqualTo(rpcTimeout + tolerance));
-      Assert.assertEquals(EndpointStateMachine.EndPointStates.GETVERSION,
-          newState);
-    }
-  }
-
-  @Test
-  public void testRegister() throws Exception {
-    DatanodeDetails nodeToRegister = TestUtils.randomDatanodeDetails();
-    try (EndpointStateMachine rpcEndPoint = createEndpoint(
-        SCMTestUtils.getConf(), serverAddress, 1000)) {
-      SCMRegisteredResponseProto responseProto = rpcEndPoint.getEndPoint()
-          .register(nodeToRegister.getProtoBufMessage(), TestUtils
-                  .createNodeReport(
-                      getStorageReports(nodeToRegister.getUuid())),
-              TestUtils.getRandomContainerReports(10),
-                  TestUtils.getRandomPipelineReports());
-      Assert.assertNotNull(responseProto);
-      Assert.assertEquals(nodeToRegister.getUuidString(),
-          responseProto.getDatanodeUUID());
-      Assert.assertNotNull(responseProto.getClusterID());
-      Assert.assertEquals(10, scmServerImpl.
-          getContainerCountsForDatanode(nodeToRegister));
-      Assert.assertEquals(1, scmServerImpl.getNodeReportsCount(nodeToRegister));
-    }
-  }
-
-  private StorageReportProto getStorageReports(UUID id) {
-    String storagePath = testDir.getAbsolutePath() + "/" + id;
-    return TestUtils.createStorageReport(id, storagePath, 100, 10, 90, null);
-  }
-
-  private EndpointStateMachine registerTaskHelper(InetSocketAddress scmAddress,
-      int rpcTimeout, boolean clearDatanodeDetails) throws Exception {
-    Configuration conf = SCMTestUtils.getConf();
-    EndpointStateMachine rpcEndPoint =
-        createEndpoint(conf,
-            scmAddress, rpcTimeout);
-    rpcEndPoint.setState(EndpointStateMachine.EndPointStates.REGISTER);
-    OzoneContainer ozoneContainer = mock(OzoneContainer.class);
-    when(ozoneContainer.getNodeReport()).thenReturn(TestUtils
-        .createNodeReport(getStorageReports(UUID.randomUUID())));
-    when(ozoneContainer.getContainerReport()).thenReturn(
-        TestUtils.getRandomContainerReports(10));
-    when(ozoneContainer.getPipelineReport()).thenReturn(
-            TestUtils.getRandomPipelineReports());
-    RegisterEndpointTask endpointTask =
-        new RegisterEndpointTask(rpcEndPoint, conf, ozoneContainer,
-            mock(StateContext.class));
-    if (!clearDatanodeDetails) {
-      DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
-      endpointTask.setDatanodeDetails(datanodeDetails);
-    }
-    endpointTask.call();
-    return rpcEndPoint;
-  }
-
-  @Test
-  public void testRegisterTask() throws Exception {
-    try (EndpointStateMachine rpcEndpoint =
-             registerTaskHelper(serverAddress, 1000, false)) {
-      // Successful register should move us to Heartbeat state.
-      Assert.assertEquals(EndpointStateMachine.EndPointStates.HEARTBEAT,
-          rpcEndpoint.getState());
-    }
-  }
-
-  @Test
-  public void testRegisterToInvalidEndpoint() throws Exception {
-    InetSocketAddress address = SCMTestUtils.getReuseableAddress();
-    try (EndpointStateMachine rpcEndpoint =
-             registerTaskHelper(address, 1000, false)) {
-      Assert.assertEquals(EndpointStateMachine.EndPointStates.REGISTER,
-          rpcEndpoint.getState());
-    }
-  }
-
-  @Test
-  public void testRegisterNoContainerID() throws Exception {
-    InetSocketAddress address = SCMTestUtils.getReuseableAddress();
-    try (EndpointStateMachine rpcEndpoint =
-             registerTaskHelper(address, 1000, true)) {
-      // No Container ID, therefore we tell the datanode that we would like to
-      // shutdown.
-      Assert.assertEquals(EndpointStateMachine.EndPointStates.SHUTDOWN,
-          rpcEndpoint.getState());
-    }
-  }
-
-  @Test
-  public void testRegisterRpcTimeout() throws Exception {
-    final long rpcTimeout = 1000;
-    final long tolerance = 200;
-    scmServerImpl.setRpcResponseDelay(1500);
-    long start = Time.monotonicNow();
-    registerTaskHelper(serverAddress, 1000, false).close();
-    long end = Time.monotonicNow();
-    scmServerImpl.setRpcResponseDelay(0);
-    Assert.assertThat(end - start, lessThanOrEqualTo(rpcTimeout + tolerance));
-  }
-
-  @Test
-  public void testHeartbeat() throws Exception {
-    DatanodeDetails dataNode = TestUtils.randomDatanodeDetails();
-    try (EndpointStateMachine rpcEndPoint =
-             createEndpoint(SCMTestUtils.getConf(),
-                 serverAddress, 1000)) {
-      SCMHeartbeatRequestProto request = SCMHeartbeatRequestProto.newBuilder()
-          .setDatanodeDetails(dataNode.getProtoBufMessage())
-          .setNodeReport(TestUtils.createNodeReport(
-              getStorageReports(UUID.randomUUID())))
-          .build();
-
-      SCMHeartbeatResponseProto responseProto = rpcEndPoint.getEndPoint()
-          .sendHeartbeat(request);
-      Assert.assertNotNull(responseProto);
-      Assert.assertEquals(0, responseProto.getCommandsCount());
-    }
-  }
-
-  @Test
-  public void testHeartbeatWithCommandStatusReport() throws Exception {
-    DatanodeDetails dataNode = TestUtils.randomDatanodeDetails();
-    try (EndpointStateMachine rpcEndPoint =
-        createEndpoint(SCMTestUtils.getConf(),
-            serverAddress, 1000)) {
-      // Add some scmCommands for heartbeat response
-      addScmCommands();
-
-
-      SCMHeartbeatRequestProto request = SCMHeartbeatRequestProto.newBuilder()
-          .setDatanodeDetails(dataNode.getProtoBufMessage())
-          .setNodeReport(TestUtils.createNodeReport(
-              getStorageReports(UUID.randomUUID())))
-          .build();
-
-      SCMHeartbeatResponseProto responseProto = rpcEndPoint.getEndPoint()
-          .sendHeartbeat(request);
-      assertNotNull(responseProto);
-      assertEquals(3, responseProto.getCommandsCount());
-      assertEquals(0, scmServerImpl.getCommandStatusReportCount());
-
-      // Send heartbeat again from heartbeat endpoint task
-      final StateContext stateContext = heartbeatTaskHelper(
-          serverAddress, 3000);
-      Map<Long, CommandStatus> map = stateContext.getCommandStatusMap();
-      assertNotNull(map);
-      assertEquals("Should have 3 objects", 3, map.size());
-      assertTrue(map.containsKey(Long.valueOf(1)));
-      assertTrue(map.containsKey(Long.valueOf(2)));
-      assertTrue(map.containsKey(Long.valueOf(3)));
-      assertTrue(map.get(Long.valueOf(1)).getType()
-          .equals(Type.closeContainerCommand));
-      assertTrue(map.get(Long.valueOf(2)).getType()
-          .equals(Type.replicateContainerCommand));
-      assertTrue(
-          map.get(Long.valueOf(3)).getType().equals(Type.deleteBlocksCommand));
-      assertTrue(map.get(Long.valueOf(1)).getStatus().equals(Status.PENDING));
-      assertTrue(map.get(Long.valueOf(2)).getStatus().equals(Status.PENDING));
-      assertTrue(map.get(Long.valueOf(3)).getStatus().equals(Status.PENDING));
-
-      scmServerImpl.clearScmCommandRequests();
-    }
-  }
-
-  private void addScmCommands() {
-    SCMCommandProto closeCommand = SCMCommandProto.newBuilder()
-        .setCloseContainerCommandProto(
-            CloseContainerCommandProto.newBuilder().setCmdId(1)
-        .setContainerID(1)
-        .setReplicationType(ReplicationType.RATIS)
-        .setPipelineID(PipelineID.randomId().getProtobuf())
-        .build())
-        .setCommandType(Type.closeContainerCommand)
-        .build();
-    SCMCommandProto replicationCommand = SCMCommandProto.newBuilder()
-        .setReplicateContainerCommandProto(
-            ReplicateContainerCommandProto.newBuilder()
-        .setCmdId(2)
-        .setContainerID(2)
-        .build())
-        .setCommandType(Type.replicateContainerCommand)
-        .build();
-    SCMCommandProto deleteBlockCommand = SCMCommandProto.newBuilder()
-        .setDeleteBlocksCommandProto(
-            DeleteBlocksCommandProto.newBuilder()
-                .setCmdId(3)
-                .addDeletedBlocksTransactions(
-                    DeletedBlocksTransaction.newBuilder()
-                        .setContainerID(45)
-                        .setCount(1)
-                        .setTxID(23)
-                        .build())
-                .build())
-        .setCommandType(Type.deleteBlocksCommand)
-        .build();
-    scmServerImpl.addScmCommandRequest(closeCommand);
-    scmServerImpl.addScmCommandRequest(deleteBlockCommand);
-    scmServerImpl.addScmCommandRequest(replicationCommand);
-  }
-
-  private StateContext heartbeatTaskHelper(InetSocketAddress scmAddress,
-      int rpcTimeout) throws Exception {
-    Configuration conf = SCMTestUtils.getConf();
-    conf.set(DFS_DATANODE_DATA_DIR_KEY, testDir.getAbsolutePath());
-    conf.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath());
-    // Mini Ozone cluster will not come up if the port is not true, since
-    // Ratis will exit if the server port cannot be bound. We can remove this
-    // hard coding once we fix the Ratis default behaviour.
-    conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true);
-
-
-    // Create a datanode state machine for stateConext used by endpoint task
-    try (DatanodeStateMachine stateMachine = new DatanodeStateMachine(
-        TestUtils.randomDatanodeDetails(), conf);
-         EndpointStateMachine rpcEndPoint =
-            createEndpoint(conf, scmAddress, rpcTimeout)) {
-      HddsProtos.DatanodeDetailsProto datanodeDetailsProto =
-          TestUtils.randomDatanodeDetails().getProtoBufMessage();
-      rpcEndPoint.setState(EndpointStateMachine.EndPointStates.HEARTBEAT);
-
-      final StateContext stateContext =
-          new StateContext(conf, DatanodeStateMachine.DatanodeStates.RUNNING,
-              stateMachine);
-
-      HeartbeatEndpointTask endpointTask =
-          new HeartbeatEndpointTask(rpcEndPoint, conf, stateContext);
-      endpointTask.setDatanodeDetailsProto(datanodeDetailsProto);
-      endpointTask.call();
-      Assert.assertNotNull(endpointTask.getDatanodeDetailsProto());
-
-      Assert.assertEquals(EndpointStateMachine.EndPointStates.HEARTBEAT,
-          rpcEndPoint.getState());
-      return stateContext;
-    }
-  }
-
-  @Test
-  public void testHeartbeatTask() throws Exception {
-    heartbeatTaskHelper(serverAddress, 1000);
-  }
-
-  @Test
-  public void testHeartbeatTaskToInvalidNode() throws Exception {
-    InetSocketAddress invalidAddress = SCMTestUtils.getReuseableAddress();
-    heartbeatTaskHelper(invalidAddress, 1000);
-  }
-
-  @Test
-  public void testHeartbeatTaskRpcTimeOut() throws Exception {
-    final long rpcTimeout = 1000;
-    final long tolerance = 200;
-    scmServerImpl.setRpcResponseDelay(1500);
-    long start = Time.monotonicNow();
-    InetSocketAddress invalidAddress = SCMTestUtils.getReuseableAddress();
-    heartbeatTaskHelper(invalidAddress, 1000);
-    long end = Time.monotonicNow();
-    scmServerImpl.setRpcResponseDelay(0);
-    Assert.assertThat(end - start,
-        lessThanOrEqualTo(rpcTimeout + tolerance));
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java
deleted file mode 100644
index da2ae84..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * Make CheckStyle Happy.
- */
-package org.apache.hadoop.ozone.container.common;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java
deleted file mode 100644
index 1c80880..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.placement;
-
-import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.container.MockNodeManager;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms
-    .SCMContainerPlacementCapacity;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms
-    .SCMContainerPlacementRandom;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
-
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
-    .HEALTHY;
-import static org.junit.Assert.assertEquals;
-
-/**
- * Asserts that allocation strategy works as expected.
- */
-public class TestContainerPlacement {
-
-  private DescriptiveStatistics computeStatistics(NodeManager nodeManager) {
-    DescriptiveStatistics descriptiveStatistics = new DescriptiveStatistics();
-    for (DatanodeDetails dd : nodeManager.getNodes(HEALTHY)) {
-      float weightedValue =
-          nodeManager.getNodeStat(dd).get().getScmUsed().get() / (float)
-              nodeManager.getNodeStat(dd).get().getCapacity().get();
-      descriptiveStatistics.addValue(weightedValue);
-    }
-    return descriptiveStatistics;
-  }
-
-  /**
-   * This test simulates lots of Cluster I/O and updates the metadata in SCM.
-   * We simulate adding and removing containers from the cluster. It asserts
-   * that our placement algorithm has taken the capacity of nodes into
-   * consideration by asserting that standard deviation of used space on these
-   * has improved.
-   */
-  @Test
-  public void testCapacityPlacementYieldsBetterDataDistribution() throws
-      SCMException {
-    final int opsCount = 200 * 1000;
-    final int nodesRequired = 3;
-    Random random = new Random();
-
-    // The nature of init code in MockNodeManager yields similar clusters.
-    MockNodeManager nodeManagerCapacity = new MockNodeManager(true, 100);
-    MockNodeManager nodeManagerRandom = new MockNodeManager(true, 100);
-    DescriptiveStatistics beforeCapacity =
-        computeStatistics(nodeManagerCapacity);
-    DescriptiveStatistics beforeRandom = computeStatistics(nodeManagerRandom);
-
-    //Assert that our initial layout of clusters are similar.
-    assertEquals(beforeCapacity.getStandardDeviation(), beforeRandom
-        .getStandardDeviation(), 0.001);
-
-    SCMContainerPlacementCapacity capacityPlacer = new
-        SCMContainerPlacementCapacity(nodeManagerCapacity, new Configuration());
-    SCMContainerPlacementRandom randomPlacer = new
-        SCMContainerPlacementRandom(nodeManagerRandom, new Configuration());
-
-    for (int x = 0; x < opsCount; x++) {
-      long containerSize = random.nextInt(100) * OzoneConsts.GB;
-      List<DatanodeDetails> nodesCapacity =
-          capacityPlacer.chooseDatanodes(new ArrayList<>(), nodesRequired,
-              containerSize);
-      assertEquals(nodesRequired, nodesCapacity.size());
-
-      List<DatanodeDetails> nodesRandom =
-          randomPlacer.chooseDatanodes(nodesCapacity, nodesRequired,
-              containerSize);
-
-      // One fifth of all calls are delete
-      if (x % 5 == 0) {
-        deleteContainer(nodeManagerCapacity, nodesCapacity, containerSize);
-        deleteContainer(nodeManagerRandom, nodesRandom, containerSize);
-      } else {
-        createContainer(nodeManagerCapacity, nodesCapacity, containerSize);
-        createContainer(nodeManagerRandom, nodesRandom, containerSize);
-      }
-    }
-    DescriptiveStatistics postCapacity = computeStatistics(nodeManagerCapacity);
-    DescriptiveStatistics postRandom = computeStatistics(nodeManagerRandom);
-
-    // This is a very bold claim, and needs large number of I/O operations.
-    // The claim in this assertion is that we improved the data distribution
-    // of this cluster in relation to the start state of the cluster.
-    Assert.assertTrue(beforeCapacity.getStandardDeviation() >
-        postCapacity.getStandardDeviation());
-
-    // This asserts that Capacity placement yields a better placement
-    // algorithm than random placement, since both cluster started at an
-    // identical state.
-
-    Assert.assertTrue(postRandom.getStandardDeviation() >
-        postCapacity.getStandardDeviation());
-  }
-
-  private void deleteContainer(MockNodeManager nodeManager,
-      List<DatanodeDetails> nodes, long containerSize) {
-    for (DatanodeDetails dd : nodes) {
-      nodeManager.delContainer(dd, containerSize);
-    }
-  }
-
-  private void createContainer(MockNodeManager nodeManager,
-      List<DatanodeDetails> nodes, long containerSize) {
-    for (DatanodeDetails dd : nodes) {
-      nodeManager.addContainer(dd, containerSize);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java
deleted file mode 100644
index 7150d1b..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.placement;
-
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Tests that test Metrics that support placement.
- */
-public class TestDatanodeMetrics {
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-  @Test
-  public void testSCMNodeMetric() {
-    SCMNodeStat stat = new SCMNodeStat(100L, 10L, 90L);
-    assertEquals((long) stat.getCapacity().get(), 100L);
-    assertEquals((long) stat.getScmUsed().get(), 10L);
-    assertEquals((long) stat.getRemaining().get(), 90L);
-    SCMNodeMetric metric = new SCMNodeMetric(stat);
-
-    SCMNodeStat newStat = new SCMNodeStat(100L, 10L, 90L);
-    assertEquals((long) stat.getCapacity().get(), 100L);
-    assertEquals((long) stat.getScmUsed().get(), 10L);
-    assertEquals((long) stat.getRemaining().get(), 90L);
-
-    SCMNodeMetric newMetric = new SCMNodeMetric(newStat);
-    assertTrue(metric.isEqual(newMetric.get()));
-
-    newMetric.add(stat);
-    assertTrue(newMetric.isGreater(metric.get()));
-
-    SCMNodeMetric zeroMetric = new SCMNodeMetric(new SCMNodeStat());
-    // Assert we can handle zero capacity.
-    assertTrue(metric.isGreater(zeroMetric.get()));
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/package-info.java
deleted file mode 100644
index ddd751c..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * Make CheckStyle Happy.
- */
-package org.apache.hadoop.ozone.container.placement;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
deleted file mode 100644
index 318c54d..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.replication;
-// Test classes for replication.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
deleted file mode 100644
index 74c3932..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
+++ /dev/null
@@ -1,402 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.testutils;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.proto
-        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.node.CommandQueue;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
-import org.apache.hadoop.hdds.scm.node.states.ReportResult;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.ozone.protocol.VersionResponse;
-import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
-import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
-import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.UUID;
-
-/**
- * A Node Manager to test replication.
- */
-public class ReplicationNodeManagerMock implements NodeManager {
-  private final Map<DatanodeDetails, NodeState> nodeStateMap;
-  private final CommandQueue commandQueue;
-
-  /**
-   * A list of Datanodes and current states.
-   * @param nodeState A node state map.
-   */
-  public ReplicationNodeManagerMock(Map<DatanodeDetails, NodeState> nodeState,
-                                    CommandQueue commandQueue) {
-    Preconditions.checkNotNull(nodeState);
-    this.nodeStateMap = nodeState;
-    this.commandQueue = commandQueue;
-  }
-
-  /**
-   * Get the minimum number of nodes to get out of chill mode.
-   *
-   * @return int
-   */
-  @Override
-  public int getMinimumChillModeNodes() {
-    return 0;
-  }
-
-  /**
-   * Returns a chill mode status string.
-   *
-   * @return String
-   */
-  @Override
-  public String getChillModeStatus() {
-    return null;
-  }
-
-  /**
-   * Get the number of data nodes that in all states.
-   *
-   * @return A state to number of nodes that in this state mapping
-   */
-  @Override
-  public Map<String, Integer> getNodeCount() {
-    return null;
-  }
-
-  /**
-   * Removes a data node from the management of this Node Manager.
-   *
-   * @param node - DataNode.
-   * @throws NodeNotFoundException
-   */
-  @Override
-  public void removeNode(DatanodeDetails node)
-      throws NodeNotFoundException {
-    nodeStateMap.remove(node);
-
-  }
-
-  /**
-   * Gets all Live Datanodes that is currently communicating with SCM.
-   *
-   * @param nodestate - State of the node
-   * @return List of Datanodes that are Heartbeating SCM.
-   */
-  @Override
-  public List<DatanodeDetails> getNodes(NodeState nodestate) {
-    return null;
-  }
-
-  /**
-   * Returns the Number of Datanodes that are communicating with SCM.
-   *
-   * @param nodestate - State of the node
-   * @return int -- count
-   */
-  @Override
-  public int getNodeCount(NodeState nodestate) {
-    return 0;
-  }
-
-  /**
-   * Get all datanodes known to SCM.
-   *
-   * @return List of DatanodeDetails known to SCM.
-   */
-  @Override
-  public List<DatanodeDetails> getAllNodes() {
-    return null;
-  }
-
-  /**
-   * Chill mode is the period when node manager waits for a minimum
-   * configured number of datanodes to report in. This is called chill mode
-   * to indicate the period before node manager gets into action.
-   * <p>
-   * Forcefully exits the chill mode, even if we have not met the minimum
-   * criteria of the nodes reporting in.
-   */
-  @Override
-  public void forceExitChillMode() {
-
-  }
-
-  /**
-   * Puts the node manager into manual chill mode.
-   */
-  @Override
-  public void enterChillMode() {
-
-  }
-
-  /**
-   * Brings node manager out of manual chill mode.
-   */
-  @Override
-  public void exitChillMode() {
-
-  }
-
-  /**
-   * Returns true if node manager is out of chill mode, else false.
-   * @return true if out of chill mode, else false
-   */
-  @Override
-  public boolean isOutOfChillMode() {
-    return !nodeStateMap.isEmpty();
-  }
-
-  /**
-   * Returns the aggregated node stats.
-   *
-   * @return the aggregated node stats.
-   */
-  @Override
-  public SCMNodeStat getStats() {
-    return null;
-  }
-
-  /**
-   * Return a map of node stats.
-   *
-   * @return a map of individual node stats (live/stale but not dead).
-   */
-  @Override
-  public Map<UUID, SCMNodeStat> getNodeStats() {
-    return null;
-  }
-
-  /**
-   * Return the node stat of the specified datanode.
-   *
-   * @param dd - datanode details.
-   * @return node stat if it is live/stale, null if it is decommissioned or
-   * doesn't exist.
-   */
-  @Override
-  public SCMNodeMetric getNodeStat(DatanodeDetails dd) {
-    return null;
-  }
-
-
-  /**
-   * Returns the node state of a specific node.
-   *
-   * @param dd - DatanodeDetails
-   * @return Healthy/Stale/Dead.
-   */
-  @Override
-  public NodeState getNodeState(DatanodeDetails dd) {
-    return nodeStateMap.get(dd);
-  }
-
-  /**
-   * Get set of pipelines a datanode is part of.
-   * @param dnId - datanodeID
-   * @return Set of PipelineID
-   */
-  @Override
-  public Set<PipelineID> getPipelineByDnID(UUID dnId) {
-    throw new UnsupportedOperationException("Not yet implemented");
-  }
-
-  /**
-   * Add pipeline information in the NodeManager.
-   * @param pipeline - Pipeline to be added
-   */
-  @Override
-  public void addPipeline(Pipeline pipeline) {
-    throw new UnsupportedOperationException("Not yet implemented");
-  }
-
-  /**
-   * Remove a pipeline information from the NodeManager.
-   * @param pipeline - Pipeline to be removed
-   */
-  @Override
-  public void removePipeline(Pipeline pipeline) {
-    throw new UnsupportedOperationException("Not yet implemented");
-  }
-
-  /**
-   * Update set of containers available on a datanode.
-   * @param uuid - DatanodeID
-   * @param containerIds - Set of containerIDs
-   * @throws SCMException - if datanode is not known. For new datanode use
-   *                        addDatanodeInContainerMap call.
-   */
-  @Override
-  public void setContainersForDatanode(UUID uuid, Set<ContainerID> containerIds)
-      throws SCMException {
-    throw new UnsupportedOperationException("Not yet implemented");
-  }
-
-  /**
-   * Process containerReport received from datanode.
-   * @param uuid - DataonodeID
-   * @param containerIds - Set of containerIDs
-   * @return The result after processing containerReport
-   */
-  @Override
-  public ReportResult<ContainerID> processContainerReport(UUID uuid,
-      Set<ContainerID> containerIds) {
-    throw new UnsupportedOperationException("Not yet implemented");
-  }
-
-  /**
-   * Return set of containerIDs available on a datanode.
-   * @param uuid - DatanodeID
-   * @return - set of containerIDs
-   */
-  @Override
-  public Set<ContainerID> getContainers(UUID uuid) {
-    throw new UnsupportedOperationException("Not yet implemented");
-  }
-
-  /**
-   * Insert a new datanode with set of containerIDs for containers available
-   * on it.
-   * @param uuid - DatanodeID
-   * @param containerIDs - Set of ContainerIDs
-   * @throws SCMException - if datanode already exists
-   */
-  @Override
-  public void addDatanodeInContainerMap(UUID uuid,
-      Set<ContainerID> containerIDs) throws SCMException {
-    throw new UnsupportedOperationException("Not yet implemented");
-  }
-
-  /**
-   * Closes this stream and releases any system resources associated
-   * with it. If the stream is already closed then invoking this
-   * method has no effect.
-   * <p>
-   * <p> As noted in {@link AutoCloseable#close()}, cases where the
-   * close may fail require careful attention. It is strongly advised
-   * to relinquish the underlying resources and to internally
-   * <em>mark</em> the {@code Closeable} as closed, prior to throwing
-   * the {@code IOException}.
-   *
-   * @throws IOException if an I/O error occurs
-   */
-  @Override
-  public void close() throws IOException {
-
-  }
-
-  /**
-   * Gets the version info from SCM.
-   *
-   * @param versionRequest - version Request.
-   * @return - returns SCM version info and other required information needed by
-   * datanode.
-   */
-  @Override
-  public VersionResponse getVersion(SCMVersionRequestProto versionRequest) {
-    return null;
-  }
-
-  /**
-   * Register the node if the node finds that it is not registered with any SCM.
-   *
-   * @param dd DatanodeDetailsProto
-   * @param nodeReport NodeReportProto
-   * @return SCMHeartbeatResponseProto
-   */
-  @Override
-  public RegisteredCommand register(DatanodeDetails dd,
-                                    NodeReportProto nodeReport,
-                                    PipelineReportsProto pipelineReportsProto) {
-    return null;
-  }
-
-  /**
-   * Send heartbeat to indicate the datanode is alive and doing well.
-   *
-   * @param dd - Datanode Details.
-   * @return SCMheartbeat response list
-   */
-  @Override
-  public List<SCMCommand> processHeartbeat(DatanodeDetails dd) {
-    return null;
-  }
-
-  /**
-   * Clears all nodes from the node Manager.
-   */
-  public void clearMap() {
-    this.nodeStateMap.clear();
-  }
-
-  /**
-   * Adds a node to the existing Node manager. This is used only for test
-   * purposes.
-   * @param id DatanodeDetails
-   * @param state State you want to put that node to.
-   */
-  public void addNode(DatanodeDetails id, NodeState state) {
-    nodeStateMap.put(id, state);
-  }
-
-  @Override
-  public void addDatanodeCommand(UUID dnId, SCMCommand command) {
-    this.commandQueue.addCommand(dnId, command);
-  }
-
-  /**
-   * Empty implementation for processNodeReport.
-   * @param dnUuid
-   * @param nodeReport
-   */
-  @Override
-  public void processNodeReport(UUID dnUuid, NodeReportProto nodeReport) {
-    // do nothing.
-  }
-
-  @Override
-  public void onMessage(CommandForDatanode commandForDatanode,
-                        EventPublisher publisher) {
-    // do nothing.
-  }
-
-  /**
-   * Empty implementation for processDeadNode.
-   * @param dnUuid
-   */
-  @Override
-  public void processDeadNode(UUID dnUuid) {
-    // do nothing.
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java
deleted file mode 100644
index 4e8a90b..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.testutils;
-// Helper classes for ozone and container tests.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/tools/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml
deleted file mode 100644
index 37c7d9d..0000000
--- a/hadoop-hdds/tools/pom.xml
+++ /dev/null
@@ -1,57 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-hdds</artifactId>
-    <version>0.3.0-SNAPSHOT</version>
-  </parent>
-
-  <artifactId>hadoop-hdds-tools</artifactId>
-  <version>0.3.0-SNAPSHOT</version>
-  <description>Apache Hadoop Distributed Data Store Tools</description>
-  <name>Apache Hadoop HDDS Tools</name>
-  <packaging>jar</packaging>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-client</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>commons-cli</groupId>
-      <artifactId>commons-cli</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.xerial</groupId>
-      <artifactId>sqlite-jdbc</artifactId>
-      <version>3.8.7</version>
-    </dependency>
-
-
-  </dependencies>
-</project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java
deleted file mode 100644
index 59cd0ba..0000000
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java
+++ /dev/null
@@ -1,150 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.cli;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.cli.GenericCli;
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.cli.container.CloseSubcommand;
-import org.apache.hadoop.hdds.scm.cli.container.CreateSubcommand;
-import org.apache.hadoop.hdds.scm.cli.container.DeleteSubcommand;
-import org.apache.hadoop.hdds.scm.cli.container.InfoSubcommand;
-import org.apache.hadoop.hdds.scm.cli.container.ListSubcommand;
-import org.apache.hadoop.hdds.scm.client.ContainerOperationClient;
-import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
-import org.apache.hadoop.hdds.scm.protocolPB
-    .StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
-import org.apache.hadoop.ipc.Client;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.NativeCodeLoader;
-
-import org.apache.commons.lang3.StringUtils;
-import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_CLIENT_ADDRESS_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_CONTAINER_SIZE_DEFAULT;
-import org.apache.log4j.ConsoleAppender;
-import org.apache.log4j.Level;
-import org.apache.log4j.LogManager;
-import org.apache.log4j.Logger;
-import org.apache.log4j.PatternLayout;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Option;
-
-/**
- * This class is the CLI of SCM.
- */
-
-/**
- * Container subcommand.
- */
-@Command(name = "ozone scmcli", hidden = true, description =
-    "Developer tools to handle SCM specific "
-        + "operations.",
-    versionProvider = HddsVersionProvider.class,
-    subcommands = {
-        ListSubcommand.class,
-        InfoSubcommand.class,
-        DeleteSubcommand.class,
-        CreateSubcommand.class,
-        CloseSubcommand.class
-    },
-    mixinStandardHelpOptions = true)
-public class SCMCLI extends GenericCli {
-
-  @Option(names = {"--scm"}, description = "The destination scm (host:port)")
-  private String scm = "";
-
-  /**
-   * Main for the scm shell Command handling.
-   *
-   * @param argv - System Args Strings[]
-   * @throws Exception
-   */
-  public static void main(String[] argv) throws Exception {
-
-    LogManager.resetConfiguration();
-    Logger.getRootLogger().setLevel(Level.INFO);
-    Logger.getRootLogger()
-        .addAppender(new ConsoleAppender(new PatternLayout("%m%n")));
-    Logger.getLogger(NativeCodeLoader.class).setLevel(Level.ERROR);
-
-    new SCMCLI().run(argv);
-  }
-
-  public ScmClient createScmClient()
-      throws IOException {
-
-    OzoneConfiguration ozoneConf = createOzoneConfiguration();
-    if (StringUtils.isNotEmpty(scm)) {
-      ozoneConf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scm);
-    }
-    if (!HddsUtils.getHostNameFromConfigKeys(ozoneConf,
-        ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY).isPresent()) {
-
-      throw new IllegalArgumentException(
-          ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY
-              + " should be set in ozone-site.xml or with the --scm option");
-    }
-
-    long version = RPC.getProtocolVersion(
-        StorageContainerLocationProtocolPB.class);
-    InetSocketAddress scmAddress =
-        getScmAddressForClients(ozoneConf);
-    int containerSizeGB = (int) ozoneConf.getStorageSize(
-        OZONE_SCM_CONTAINER_SIZE, OZONE_SCM_CONTAINER_SIZE_DEFAULT,
-        StorageUnit.GB);
-    ContainerOperationClient
-        .setContainerSizeB(containerSizeGB * OzoneConsts.GB);
-
-    RPC.setProtocolEngine(ozoneConf, StorageContainerLocationProtocolPB.class,
-        ProtobufRpcEngine.class);
-    StorageContainerLocationProtocolClientSideTranslatorPB client =
-        new StorageContainerLocationProtocolClientSideTranslatorPB(
-            RPC.getProxy(StorageContainerLocationProtocolPB.class, version,
-                scmAddress, UserGroupInformation.getCurrentUser(), ozoneConf,
-                NetUtils.getDefaultSocketFactory(ozoneConf),
-                Client.getRpcTimeout(ozoneConf)));
-    return new ContainerOperationClient(
-        client, new XceiverClientManager(ozoneConf));
-  }
-
-  public void checkContainerExists(ScmClient scmClient, long containerId)
-      throws IOException {
-    ContainerInfo container = scmClient.getContainer(containerId);
-    if (container == null) {
-      throw new IllegalArgumentException("No such container " + containerId);
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseSubcommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseSubcommand.java
deleted file mode 100644
index 173d0ce..0000000
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseSubcommand.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.cli.container;
-
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.scm.cli.SCMCLI;
-import org.apache.hadoop.hdds.scm.client.ScmClient;
-
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Parameters;
-import picocli.CommandLine.ParentCommand;
-
-/**
- * The handler of close container command.
- */
-@Command(
-    name = "close",
-    description = "close container",
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class)
-public class CloseSubcommand implements Callable<Void> {
-
-  @ParentCommand
-  private SCMCLI parent;
-
-  @Parameters(description = "Id of the container to close")
-  private long containerId;
-
-  @Override
-  public Void call() throws Exception {
-    try (ScmClient scmClient = parent.createScmClient()) {
-      parent.checkContainerExists(scmClient, containerId);
-      scmClient.closeContainer(containerId);
-      return null;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java
deleted file mode 100644
index 1dda9c4..0000000
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.cli.container;
-
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.scm.cli.SCMCLI;
-import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .ContainerWithPipeline;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Option;
-import picocli.CommandLine.ParentCommand;
-
-/**
- * This is the handler that process container creation command.
- */
-@Command(
-    name = "create",
-    description = "Create container",
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class)
-public class CreateSubcommand implements Callable<Void> {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(CreateSubcommand.class);
-
-  @ParentCommand
-  private SCMCLI parent;
-
-  @Option(description = "Owner of the new container", defaultValue = "OZONE",
-      required = false, names = {
-      "-o", "--owner"})
-
-  private String owner;
-
-  @Override
-  public Void call() throws Exception {
-    try (ScmClient scmClient = parent.createScmClient()) {
-      ContainerWithPipeline container = scmClient.createContainer(owner);
-      LOG.info("Container {} is created.",
-          container.getContainerInfo().getContainerID());
-      return null;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteSubcommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteSubcommand.java
deleted file mode 100644
index c163a3a..0000000
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteSubcommand.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.cli.container;
-
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.scm.cli.SCMCLI;
-import org.apache.hadoop.hdds.scm.client.ScmClient;
-
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Option;
-import picocli.CommandLine.Parameters;
-import picocli.CommandLine.ParentCommand;
-
-/**
- * This is the handler that process delete container command.
- */
-@Command(
-    name = "delete",
-    description = "Delete container",
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class)
-public class DeleteSubcommand implements Callable<Void> {
-
-  @Parameters(description = "Id of the container to close")
-  private long containerId;
-
-  @Option(names = {"-f",
-      "--force"}, description = "forcibly delete the container")
-  private boolean force;
-
-  @ParentCommand
-  private SCMCLI parent;
-
-  @Override
-  public Void call() throws Exception {
-    try (ScmClient scmClient = parent.createScmClient()) {
-      parent.checkContainerExists(scmClient, containerId);
-      scmClient.deleteContainer(containerId, force);
-      return null;
-    }
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message