hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From aengin...@apache.org
Subject [28/51] [partial] hadoop git commit: HDFS-13258. Ozone: restructure Hdsl/Ozone code to separated maven subprojects. Contributed by Elek Marton, Mukul Kumar Singh, Xiaoyu Yao, Ajay Kumar, Anu Engineer, Lokesh Jain, Nanda Kumar.
Date Thu, 15 Mar 2018 16:32:43 GMT
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
deleted file mode 100644
index 95bf107..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
+++ /dev/null
@@ -1,198 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common.states.endpoint;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .EndpointStateMachine;
-
-import org.apache.hadoop.ozone.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerNodeIDProto;
-import org.apache.hadoop.scm.ScmConfigKeys;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.concurrent.Callable;
-import java.util.concurrent.Future;
-
-/**
- * Register a container with SCM.
- */
-public final class RegisterEndpointTask implements
-    Callable<EndpointStateMachine.EndPointStates> {
-  static final Logger LOG = LoggerFactory.getLogger(RegisterEndpointTask.class);
-
-  private final EndpointStateMachine rpcEndPoint;
-  private final Configuration conf;
-  private Future<EndpointStateMachine.EndPointStates> result;
-  private ContainerNodeIDProto containerNodeIDProto;
-
-  /**
-   * Creates a register endpoint task.
-   *
-   * @param rpcEndPoint - endpoint
-   * @param conf - conf
-   */
-  @VisibleForTesting
-  public RegisterEndpointTask(EndpointStateMachine rpcEndPoint,
-      Configuration conf) {
-    this.rpcEndPoint = rpcEndPoint;
-    this.conf = conf;
-
-  }
-
-  /**
-   * Get the ContainerNodeID Proto.
-   *
-   * @return ContainerNodeIDProto
-   */
-  public ContainerNodeIDProto getContainerNodeIDProto() {
-    return containerNodeIDProto;
-  }
-
-  /**
-   * Set the contiainerNodeID Proto.
-   *
-   * @param containerNodeIDProto - Container Node ID.
-   */
-  public void setContainerNodeIDProto(ContainerNodeIDProto
-      containerNodeIDProto) {
-    this.containerNodeIDProto = containerNodeIDProto;
-  }
-
-  /**
-   * Computes a result, or throws an exception if unable to do so.
-   *
-   * @return computed result
-   * @throws Exception if unable to compute a result
-   */
-  @Override
-  public EndpointStateMachine.EndPointStates call() throws Exception {
-
-    if (getContainerNodeIDProto() == null) {
-      LOG.error("Container ID proto cannot be null in RegisterEndpoint task, " +
-          "shutting down the endpoint.");
-      return rpcEndPoint.setState(EndpointStateMachine.EndPointStates.SHUTDOWN);
-    }
-
-    rpcEndPoint.lock();
-    try {
-      DatanodeID dnNodeID = DatanodeID.getFromProtoBuf(
-          getContainerNodeIDProto().getDatanodeID());
-
-      // TODO : Add responses to the command Queue.
-      rpcEndPoint.getEndPoint().register(dnNodeID,
-          conf.getStrings(ScmConfigKeys.OZONE_SCM_NAMES));
-      EndpointStateMachine.EndPointStates nextState =
-          rpcEndPoint.getState().getNextState();
-      rpcEndPoint.setState(nextState);
-      rpcEndPoint.zeroMissedCount();
-    } catch (IOException ex) {
-      rpcEndPoint.logIfNeeded(ex
-      );
-    } finally {
-      rpcEndPoint.unlock();
-    }
-
-    return rpcEndPoint.getState();
-  }
-
-  /**
-   * Returns a builder class for RegisterEndPoint task.
-   *
-   * @return Builder.
-   */
-  public static Builder newBuilder() {
-    return new Builder();
-  }
-
-  /**
-   * Builder class for RegisterEndPoint task.
-   */
-  public static class Builder {
-    private EndpointStateMachine endPointStateMachine;
-    private Configuration conf;
-    private ContainerNodeIDProto containerNodeIDProto;
-
-    /**
-     * Constructs the builder class.
-     */
-    public Builder() {
-    }
-
-    /**
-     * Sets the endpoint state machine.
-     *
-     * @param rpcEndPoint - Endpoint state machine.
-     * @return Builder
-     */
-    public Builder setEndpointStateMachine(EndpointStateMachine rpcEndPoint) {
-      this.endPointStateMachine = rpcEndPoint;
-      return this;
-    }
-
-    /**
-     * Sets the Config.
-     *
-     * @param config - config
-     * @return Builder.
-     */
-    public Builder setConfig(Configuration config) {
-      this.conf = config;
-      return this;
-    }
-
-    /**
-     * Sets the NodeID.
-     *
-     * @param nodeID - NodeID proto
-     * @return Builder
-     */
-    public Builder setNodeID(ContainerNodeIDProto nodeID) {
-      this.containerNodeIDProto = nodeID;
-      return this;
-    }
-
-    public RegisterEndpointTask build() {
-      if (endPointStateMachine == null) {
-        LOG.error("No endpoint specified.");
-        throw new IllegalArgumentException("A valid endpoint state machine is" +
-            " needed to construct RegisterEndPoint task");
-      }
-
-      if (conf == null) {
-        LOG.error("No config specified.");
-        throw new IllegalArgumentException("A valid configration is needed to" +
-            " construct RegisterEndpoint task");
-      }
-
-      if (containerNodeIDProto == null) {
-        LOG.error("No nodeID specified.");
-        throw new IllegalArgumentException("A vaild Node ID is needed to " +
-            "construct RegisterEndpoint task");
-      }
-
-      RegisterEndpointTask task = new RegisterEndpointTask(this
-          .endPointStateMachine, this.conf);
-      task.setContainerNodeIDProto(containerNodeIDProto);
-      return task;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
deleted file mode 100644
index fa59234..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common.states.endpoint;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine;
-import org.apache.hadoop.ozone.protocol.VersionResponse;
-import org.apache.hadoop.ozone.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
-
-import java.io.IOException;
-import java.util.concurrent.Callable;
-
-/**
- * Task that returns version.
- */
-public class VersionEndpointTask implements
-    Callable<EndpointStateMachine.EndPointStates> {
-  private final EndpointStateMachine rpcEndPoint;
-  private final Configuration configuration;
-
-  public VersionEndpointTask(EndpointStateMachine rpcEndPoint,
-      Configuration conf) {
-    this.rpcEndPoint = rpcEndPoint;
-    this.configuration = conf;
-  }
-
-  /**
-   * Computes a result, or throws an exception if unable to do so.
-   *
-   * @return computed result
-   * @throws Exception if unable to compute a result
-   */
-  @Override
-  public EndpointStateMachine.EndPointStates call() throws Exception {
-    rpcEndPoint.lock();
-    try{
-      SCMVersionResponseProto versionResponse =
-          rpcEndPoint.getEndPoint().getVersion(null);
-      rpcEndPoint.setVersion(VersionResponse.getFromProtobuf(versionResponse));
-
-      EndpointStateMachine.EndPointStates nextState =
-          rpcEndPoint.getState().getNextState();
-      rpcEndPoint.setState(nextState);
-      rpcEndPoint.zeroMissedCount();
-    } catch (IOException ex) {
-      rpcEndPoint.logIfNeeded(ex);
-    } finally {
-      rpcEndPoint.unlock();
-    }
-    return rpcEndPoint.getState();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java
deleted file mode 100644
index 1122598..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common.states.endpoint;
-/**
- This package contains code for RPC endpoints transitions.
- */
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/states/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/states/package-info.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/states/package-info.java
deleted file mode 100644
index 92c953f..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/states/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.states;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java
deleted file mode 100644
index 3a5ce18..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.transport.server;
-
-import com.google.common.base.Preconditions;
-import io.netty.bootstrap.ServerBootstrap;
-import io.netty.channel.Channel;
-import io.netty.channel.EventLoopGroup;
-import io.netty.channel.nio.NioEventLoopGroup;
-import io.netty.channel.socket.nio.NioServerSocketChannel;
-import io.netty.handler.logging.LogLevel;
-import io.netty.handler.logging.LoggingHandler;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
-import org.apache.hadoop.ozone.protocol.proto.OzoneProtos;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.net.ServerSocket;
-import java.net.SocketAddress;
-
-/**
- * Creates a netty server endpoint that acts as the communication layer for
- * Ozone containers.
- */
-public final class XceiverServer implements XceiverServerSpi {
-  private static final Logger
-      LOG = LoggerFactory.getLogger(XceiverServer.class);
-  private int port;
-  private final ContainerDispatcher storageContainer;
-
-  private EventLoopGroup bossGroup;
-  private EventLoopGroup workerGroup;
-  private Channel channel;
-
-  /**
-   * Constructs a netty server class.
-   *
-   * @param conf - Configuration
-   */
-  public XceiverServer(Configuration conf,
-                       ContainerDispatcher dispatcher) {
-    Preconditions.checkNotNull(conf);
-
-    this.port = conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-        OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
-    // Get an available port on current node and
-    // use that as the container port
-    if (conf.getBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT,
-        OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT)) {
-      try (ServerSocket socket = new ServerSocket()) {
-        socket.setReuseAddress(true);
-        SocketAddress address = new InetSocketAddress(0);
-        socket.bind(address);
-        this.port = socket.getLocalPort();
-        LOG.info("Found a free port for the server : {}", this.port);
-      } catch (IOException e) {
-        LOG.error("Unable find a random free port for the server, "
-            + "fallback to use default port {}", this.port, e);
-      }
-    }
-    this.storageContainer = dispatcher;
-  }
-
-  @Override
-  public int getIPCPort() {
-    return this.port;
-  }
-
-  /**
-   * Returns the Replication type supported by this end-point.
-   *
-   * @return enum -- {Stand_Alone, Ratis, Chained}
-   */
-  @Override
-  public OzoneProtos.ReplicationType getServerType() {
-    return OzoneProtos.ReplicationType.STAND_ALONE;
-  }
-
-  @Override
-  public void start() throws IOException {
-    bossGroup = new NioEventLoopGroup();
-    workerGroup = new NioEventLoopGroup();
-    channel = new ServerBootstrap()
-        .group(bossGroup, workerGroup)
-        .channel(NioServerSocketChannel.class)
-        .handler(new LoggingHandler(LogLevel.INFO))
-        .childHandler(new XceiverServerInitializer(storageContainer))
-        .bind(port)
-        .syncUninterruptibly()
-        .channel();
-  }
-
-  @Override
-  public void stop() {
-    if (storageContainer != null) {
-      storageContainer.shutdown();
-    }
-    if (bossGroup != null) {
-      bossGroup.shutdownGracefully();
-    }
-    if (workerGroup != null) {
-      workerGroup.shutdownGracefully();
-    }
-    if (channel != null) {
-      channel.close().awaitUninterruptibly();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerHandler.java
deleted file mode 100644
index c4a8f53..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerHandler.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.transport.server;
-
-import io.netty.channel.ChannelHandlerContext;
-import io.netty.channel.SimpleChannelInboundHandler;
-import org.slf4j.LoggerFactory;
-import org.slf4j.Logger;
-import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.ContainerCommandResponseProto;
-import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.ContainerCommandRequestProto;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
-
-/**
- * Netty server handlers that respond to Network events.
- */
-public class XceiverServerHandler extends
-    SimpleChannelInboundHandler<ContainerCommandRequestProto> {
-
-  static final Logger LOG = LoggerFactory.getLogger(XceiverServerHandler.class);
-  private final ContainerDispatcher dispatcher;
-
-  /**
-   * Constructor for server handler.
-   * @param dispatcher - Dispatcher interface
-   */
-  public XceiverServerHandler(ContainerDispatcher dispatcher) {
-    this.dispatcher = dispatcher;
-  }
-
-  /**
-   * <strong>Please keep in mind that this method will be renamed to {@code
-   * messageReceived(ChannelHandlerContext, I)} in 5.0.</strong>
-   * <p>
-   * Is called for each message of type {@link ContainerCommandRequestProto}.
-   *
-   * @param ctx the {@link ChannelHandlerContext} which this {@link
-   *            SimpleChannelInboundHandler} belongs to
-   * @param msg the message to handle
-   * @throws Exception is thrown if an error occurred
-   */
-  @Override
-  public void channelRead0(ChannelHandlerContext ctx,
-                           ContainerCommandRequestProto msg) throws
-      Exception {
-    ContainerCommandResponseProto response = this.dispatcher.dispatch(msg);
-    LOG.debug("Writing the reponse back to client.");
-    ctx.writeAndFlush(response);
-
-  }
-
-  /**
-   * Calls {@link ChannelHandlerContext#fireExceptionCaught(Throwable)}
-   * Sub-classes may override this method to change behavior.
-   *
-   * @param ctx   - Channel Handler Context
-   * @param cause - Exception
-   */
-  @Override
-  public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause)
-      throws Exception {
-    LOG.error("An exception caught in the pipeline : " + cause.toString());
-    super.exceptionCaught(ctx, cause);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerInitializer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerInitializer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerInitializer.java
deleted file mode 100644
index 4d32d86..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerInitializer.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.transport.server;
-
-import com.google.common.base.Preconditions;
-import io.netty.channel.ChannelInitializer;
-import io.netty.channel.ChannelPipeline;
-import io.netty.channel.socket.SocketChannel;
-import io.netty.handler.codec.protobuf.ProtobufDecoder;
-import io.netty.handler.codec.protobuf.ProtobufEncoder;
-import io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-import io.netty.handler.codec.protobuf.ProtobufVarint32LengthFieldPrepender;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
-import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.ContainerCommandRequestProto;
-
-/**
- * Creates a channel for the XceiverServer.
- */
-public class XceiverServerInitializer extends ChannelInitializer<SocketChannel>{
-  private final ContainerDispatcher dispatcher;
-  public XceiverServerInitializer(ContainerDispatcher dispatcher) {
-    Preconditions.checkNotNull(dispatcher);
-    this.dispatcher = dispatcher;
-  }
-
-  /**
-   * This method will be called once the Channel is registered. After
-   * the method returns this instance will be removed from the {@link
-   * ChannelPipeline}
-   *
-   * @param ch the  which was registered.
-   * @throws Exception is thrown if an error occurs. In that case the channel
-   * will be closed.
-   */
-  @Override
-  protected void initChannel(SocketChannel ch) throws Exception {
-    ChannelPipeline pipeline = ch.pipeline();
-    pipeline.addLast(new ProtobufVarint32FrameDecoder());
-    pipeline.addLast(new ProtobufDecoder(ContainerCommandRequestProto
-        .getDefaultInstance()));
-    pipeline.addLast(new ProtobufVarint32LengthFieldPrepender());
-    pipeline.addLast(new ProtobufEncoder());
-    pipeline.addLast(new XceiverServerHandler(dispatcher));
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java
deleted file mode 100644
index b61d7fd..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.transport.server;
-
-import org.apache.hadoop.ozone.protocol.proto.OzoneProtos;
-
-import java.io.IOException;
-
-/** A server endpoint that acts as the communication layer for Ozone
- * containers. */
-public interface XceiverServerSpi {
-  /** Starts the server. */
-  void start() throws IOException;
-
-  /** Stops a running server. */
-  void stop();
-
-  /** Get server IPC port. */
-  int getIPCPort();
-
-  /**
-   * Returns the Replication type supported by this end-point.
-   * @return enum -- {Stand_Alone, Ratis, Chained}
-   */
-  OzoneProtos.ReplicationType getServerType();
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/package-info.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/package-info.java
deleted file mode 100644
index 59c96f1..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.transport.server;
-
-/**
- * This package contains classes for the server of the storage container
- * protocol.
- */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
deleted file mode 100644
index 569fb23..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.transport.server.ratis;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
-import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.ContainerCommandRequestProto;
-import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.ContainerCommandResponseProto;
-import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.WriteChunkRequestProto;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
-import org.apache.ratis.conf.RaftProperties;
-import org.apache.ratis.protocol.Message;
-import org.apache.ratis.protocol.RaftClientRequest;
-import org.apache.ratis.protocol.RaftPeerId;
-import org.apache.ratis.server.storage.RaftStorage;
-import org.apache.ratis.shaded.com.google.protobuf.ShadedProtoUtil;
-import org.apache.ratis.shaded.proto.RaftProtos.LogEntryProto;
-import org.apache.ratis.shaded.proto.RaftProtos.SMLogEntryProto;
-import org.apache.ratis.statemachine.impl.BaseStateMachine;
-import org.apache.ratis.statemachine.impl.SimpleStateMachineStorage;
-import org.apache.ratis.statemachine.StateMachineStorage;
-import org.apache.ratis.statemachine.TransactionContext;
-import org.apache.ratis.statemachine.impl.TransactionContextImpl;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.ratis.shaded.com.google.protobuf.ByteString;
-import com.google.protobuf.InvalidProtocolBufferException;
-
-import java.io.IOException;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.ConcurrentHashMap;
-
-/** A {@link org.apache.ratis.statemachine.StateMachine} for containers.
- *
- * The stateMachine is responsible for handling different types of container
- * requests. The container requests can be divided into readonly and write
- * requests.
- *
- * Read only requests are classified in
- * {@link org.apache.hadoop.scm.XceiverClientRatis#isReadOnly}
- * and these readonly requests are replied from the {@link #query(Message)}.
- *
- * The write requests can be divided into requests with user data
- * (WriteChunkRequest) and other request without user data.
- *
- * Inorder to optimize the write throughput, the writeChunk request is
- * processed in 2 phases. The 2 phases are divided in
- * {@link #startTransaction(RaftClientRequest)}, in the first phase the user
- * data is written directly into the state machine via
- * {@link #writeStateMachineData} and in the second phase the
- * transaction is committed via {@link #applyTransaction(TransactionContext)}
- *
- * For the requests with no stateMachine data, the transaction is directly
- * committed through
- * {@link #applyTransaction(TransactionContext)}
- *
- * There are 2 ordering operation which are enforced right now in the code,
- * 1) Write chunk operation are executed after the create container operation,
- * the write chunk operation will fail otherwise as the container still hasn't
- * been created. Hence the create container operation has been split in the
- * {@link #startTransaction(RaftClientRequest)}, this will help in synchronizing
- * the calls in {@link #writeStateMachineData}
- *
- * 2) Write chunk commit operation is executed after write chunk state machine
- * operation. This will ensure that commit operation is sync'd with the state
- * machine operation.
- * */
-public class ContainerStateMachine extends BaseStateMachine {
-  static final Logger LOG = LoggerFactory.getLogger(
-      ContainerStateMachine.class);
-  private final SimpleStateMachineStorage storage
-      = new SimpleStateMachineStorage();
-  private final ContainerDispatcher dispatcher;
-  private ThreadPoolExecutor writeChunkExecutor;
-  private final ConcurrentHashMap<Long, CompletableFuture<Message>>
-      writeChunkFutureMap;
-  private final ConcurrentHashMap<String, CompletableFuture<Message>>
-      createContainerFutureMap;
-
-  ContainerStateMachine(ContainerDispatcher dispatcher,
-      ThreadPoolExecutor writeChunkExecutor) {
-    this.dispatcher = dispatcher;
-    this.writeChunkExecutor = writeChunkExecutor;
-    this.writeChunkFutureMap = new ConcurrentHashMap<>();
-    this.createContainerFutureMap = new ConcurrentHashMap<>();
-  }
-
-  @Override
-  public StateMachineStorage getStateMachineStorage() {
-    return storage;
-  }
-
-  @Override
-  public void initialize(
-      RaftPeerId id, RaftProperties properties, RaftStorage raftStorage)
-      throws IOException {
-    super.initialize(id, properties, raftStorage);
-    storage.init(raftStorage);
-    //  TODO handle snapshots
-
-    // TODO: Add a flag that tells you that initialize has been called.
-    // Check with Ratis if this feature is done in Ratis.
-  }
-
-  @Override
-  public TransactionContext startTransaction(RaftClientRequest request)
-      throws IOException {
-    final ContainerCommandRequestProto proto =
-        getRequestProto(request.getMessage().getContent());
-
-    final SMLogEntryProto log;
-    if (proto.getCmdType() == ContainerProtos.Type.WriteChunk) {
-      final WriteChunkRequestProto write = proto.getWriteChunk();
-      // create the state machine data proto
-      final WriteChunkRequestProto dataWriteChunkProto =
-          WriteChunkRequestProto
-              .newBuilder(write)
-              .setStage(ContainerProtos.Stage.WRITE_DATA)
-              .build();
-      ContainerCommandRequestProto dataContainerCommandProto =
-          ContainerCommandRequestProto
-              .newBuilder(proto)
-              .setWriteChunk(dataWriteChunkProto)
-              .build();
-
-      // create the log entry proto
-      final WriteChunkRequestProto commitWriteChunkProto =
-          WriteChunkRequestProto.newBuilder()
-              .setPipeline(write.getPipeline())
-              .setKeyName(write.getKeyName())
-              .setChunkData(write.getChunkData())
-              // skipping the data field as it is
-              // already set in statemachine data proto
-              .setStage(ContainerProtos.Stage.COMMIT_DATA)
-              .build();
-      ContainerCommandRequestProto commitContainerCommandProto =
-          ContainerCommandRequestProto
-              .newBuilder(proto)
-              .setWriteChunk(commitWriteChunkProto)
-              .build();
-
-      log = SMLogEntryProto.newBuilder()
-          .setData(getShadedByteString(commitContainerCommandProto))
-          .setStateMachineData(getShadedByteString(dataContainerCommandProto))
-          .build();
-    } else if (proto.getCmdType() == ContainerProtos.Type.CreateContainer) {
-      log = SMLogEntryProto.newBuilder()
-          .setData(request.getMessage().getContent())
-          .setStateMachineData(request.getMessage().getContent())
-          .build();
-    } else {
-      log = SMLogEntryProto.newBuilder()
-          .setData(request.getMessage().getContent())
-          .build();
-    }
-    return new TransactionContextImpl(this, request, log);
-  }
-
-  private ByteString getShadedByteString(ContainerCommandRequestProto proto) {
-    return ShadedProtoUtil.asShadedByteString(proto.toByteArray());
-  }
-
-  private ContainerCommandRequestProto getRequestProto(ByteString request)
-      throws InvalidProtocolBufferException {
-    return ContainerCommandRequestProto.parseFrom(
-        ShadedProtoUtil.asByteString(request));
-  }
-
-  private Message runCommand(ContainerCommandRequestProto requestProto) {
-    LOG.trace("dispatch {}", requestProto);
-    ContainerCommandResponseProto response = dispatcher.dispatch(requestProto);
-    LOG.trace("response {}", response);
-    return () -> ShadedProtoUtil.asShadedByteString(response.toByteArray());
-  }
-
-  private CompletableFuture<Message> handleWriteChunk(
-      ContainerCommandRequestProto requestProto, long entryIndex) {
-    final WriteChunkRequestProto write = requestProto.getWriteChunk();
-    String containerName = write.getPipeline().getContainerName();
-    CompletableFuture<Message> future =
-        createContainerFutureMap.get(containerName);
-    CompletableFuture<Message> writeChunkFuture;
-    if (future != null) {
-      writeChunkFuture = future.thenApplyAsync(
-          v -> runCommand(requestProto), writeChunkExecutor);
-    } else {
-      writeChunkFuture = CompletableFuture.supplyAsync(
-          () -> runCommand(requestProto), writeChunkExecutor);
-    }
-    writeChunkFutureMap.put(entryIndex, writeChunkFuture);
-    return writeChunkFuture;
-  }
-
-  private CompletableFuture<Message> handleCreateContainer(
-      ContainerCommandRequestProto requestProto) {
-    String containerName =
-        requestProto.getCreateContainer().getContainerData().getName();
-    createContainerFutureMap.
-        computeIfAbsent(containerName, k -> new CompletableFuture<>());
-    return CompletableFuture.completedFuture(() -> ByteString.EMPTY);
-  }
-
-  @Override
-  public CompletableFuture<Message> writeStateMachineData(LogEntryProto entry) {
-    try {
-      final ContainerCommandRequestProto requestProto =
-          getRequestProto(entry.getSmLogEntry().getStateMachineData());
-      ContainerProtos.Type cmdType = requestProto.getCmdType();
-      switch (cmdType) {
-      case CreateContainer:
-        return handleCreateContainer(requestProto);
-      case WriteChunk:
-        return handleWriteChunk(requestProto, entry.getIndex());
-      default:
-        throw new IllegalStateException("Cmd Type:" + cmdType
-            + " should not have state machine data");
-      }
-    } catch (IOException e) {
-      return completeExceptionally(e);
-    }
-  }
-
-  @Override
-  public CompletableFuture<Message> query(Message request) {
-    try {
-      final ContainerCommandRequestProto requestProto =
-          getRequestProto(request.getContent());
-      return CompletableFuture.completedFuture(runCommand(requestProto));
-    } catch (IOException e) {
-      return completeExceptionally(e);
-    }
-  }
-
-  @Override
-  public CompletableFuture<Message> applyTransaction(TransactionContext trx) {
-    try {
-      ContainerCommandRequestProto requestProto =
-          getRequestProto(trx.getSMLogEntry().getData());
-      ContainerProtos.Type cmdType = requestProto.getCmdType();
-
-      if (cmdType == ContainerProtos.Type.WriteChunk) {
-        WriteChunkRequestProto write = requestProto.getWriteChunk();
-        // the data field has already been removed in start Transaction
-        Preconditions.checkArgument(!write.hasData());
-        CompletableFuture<Message> stateMachineFuture =
-            writeChunkFutureMap.remove(trx.getLogEntry().getIndex());
-        return stateMachineFuture
-            .thenComposeAsync(v ->
-                CompletableFuture.completedFuture(runCommand(requestProto)));
-      } else {
-        Message message = runCommand(requestProto);
-        if (cmdType == ContainerProtos.Type.CreateContainer) {
-          String containerName =
-              requestProto.getCreateContainer().getContainerData().getName();
-          createContainerFutureMap.remove(containerName).complete(message);
-        }
-        return CompletableFuture.completedFuture(message);
-      }
-    } catch (IOException e) {
-      return completeExceptionally(e);
-    }
-  }
-
-  private static <T> CompletableFuture<T> completeExceptionally(Exception e) {
-    final CompletableFuture<T> future = new CompletableFuture<>();
-    future.completeExceptionally(e);
-    return future;
-  }
-
-  @Override
-  public void close() throws IOException {
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
deleted file mode 100644
index d0ff094..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.transport.server.ratis;
-
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
-import org.apache.hadoop.ozone.container.common.transport.server
-    .XceiverServerSpi;
-
-import org.apache.hadoop.ozone.protocol.proto.OzoneProtos;
-import org.apache.ratis.RaftConfigKeys;
-import org.apache.ratis.conf.RaftProperties;
-import org.apache.ratis.grpc.GrpcConfigKeys;
-import org.apache.ratis.netty.NettyConfigKeys;
-import org.apache.ratis.RatisHelper;
-import org.apache.ratis.rpc.RpcType;
-import org.apache.ratis.rpc.SupportedRpcType;
-import org.apache.ratis.server.RaftServer;
-import org.apache.ratis.server.RaftServerConfigKeys;
-import org.apache.ratis.util.SizeInBytes;
-import org.apache.ratis.util.TimeDuration;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.net.ServerSocket;
-import java.net.SocketAddress;
-import java.util.Objects;
-import java.util.concurrent.ArrayBlockingQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Creates a ratis server endpoint that acts as the communication layer for
- * Ozone containers.
- */
-public final class XceiverServerRatis implements XceiverServerSpi {
-  static final Logger LOG = LoggerFactory.getLogger(XceiverServerRatis.class);
-  private final int port;
-  private final RaftServer server;
-  private ThreadPoolExecutor writeChunkExecutor;
-
-  private XceiverServerRatis(DatanodeID id, int port, String storageDir,
-      ContainerDispatcher dispatcher, Configuration conf) throws IOException {
-
-    final String rpcType = conf.get(
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY,
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT);
-    final RpcType rpc = SupportedRpcType.valueOfIgnoreCase(rpcType);
-    final int raftSegmentSize = conf.getInt(
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY,
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT);
-    final int raftSegmentPreallocatedSize = conf.getInt(
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY,
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT);
-    final int maxChunkSize = OzoneConfigKeys.DFS_CONTAINER_CHUNK_MAX_SIZE;
-    final int numWriteChunkThreads = conf.getInt(
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY,
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT);
-
-    Objects.requireNonNull(id, "id == null");
-    this.port = port;
-    RaftProperties serverProperties = newRaftProperties(rpc, port,
-        storageDir, maxChunkSize, raftSegmentSize, raftSegmentPreallocatedSize);
-
-    writeChunkExecutor =
-        new ThreadPoolExecutor(numWriteChunkThreads, numWriteChunkThreads,
-            100, TimeUnit.SECONDS,
-            new ArrayBlockingQueue<>(1024),
-            new ThreadPoolExecutor.CallerRunsPolicy());
-    ContainerStateMachine stateMachine =
-        new ContainerStateMachine(dispatcher, writeChunkExecutor);
-    this.server = RaftServer.newBuilder()
-        .setServerId(RatisHelper.toRaftPeerId(id))
-        .setGroup(RatisHelper.emptyRaftGroup())
-        .setProperties(serverProperties)
-        .setStateMachine(stateMachine)
-        .build();
-  }
-
-  private static RaftProperties newRaftProperties(
-      RpcType rpc, int port, String storageDir, int scmChunkSize,
-      int raftSegmentSize, int raftSegmentPreallocatedSize) {
-    final RaftProperties properties = new RaftProperties();
-    RaftServerConfigKeys.Log.Appender.setBatchEnabled(properties, true);
-    RaftServerConfigKeys.Log.Appender.setBufferCapacity(properties,
-        SizeInBytes.valueOf(raftSegmentPreallocatedSize));
-    RaftServerConfigKeys.Log.setWriteBufferSize(properties,
-        SizeInBytes.valueOf(scmChunkSize));
-    RaftServerConfigKeys.Log.setPreallocatedSize(properties,
-        SizeInBytes.valueOf(raftSegmentPreallocatedSize));
-    RaftServerConfigKeys.Log.setSegmentSizeMax(properties,
-        SizeInBytes.valueOf(raftSegmentSize));
-    RaftServerConfigKeys.setStorageDir(properties, new File(storageDir));
-    RaftConfigKeys.Rpc.setType(properties, rpc);
-
-    RaftServerConfigKeys.Log.setMaxCachedSegmentNum(properties, 2);
-    GrpcConfigKeys.setMessageSizeMax(properties,
-        SizeInBytes.valueOf(scmChunkSize + raftSegmentPreallocatedSize));
-    RaftServerConfigKeys.Rpc.setTimeoutMin(properties,
-        TimeDuration.valueOf(800, TimeUnit.MILLISECONDS));
-    RaftServerConfigKeys.Rpc.setTimeoutMax(properties,
-        TimeDuration.valueOf(1000, TimeUnit.MILLISECONDS));
-    if (rpc == SupportedRpcType.GRPC) {
-      GrpcConfigKeys.Server.setPort(properties, port);
-    } else if (rpc == SupportedRpcType.NETTY) {
-      NettyConfigKeys.Server.setPort(properties, port);
-    }
-    return properties;
-  }
-
-  public static XceiverServerRatis newXceiverServerRatis(DatanodeID datanodeID,
-      Configuration ozoneConf, ContainerDispatcher dispatcher)
-      throws IOException {
-    final String ratisDir = File.separator + "ratis";
-    int localPort = ozoneConf.getInt(
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT,
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT);
-    String storageDir = ozoneConf.get(
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR);
-
-    if (Strings.isNullOrEmpty(storageDir)) {
-      storageDir = ozoneConf.get(OzoneConfigKeys
-          .OZONE_METADATA_DIRS);
-      Preconditions.checkNotNull(storageDir, "ozone.metadata.dirs " +
-          "cannot be null, Please check your configs.");
-      storageDir = storageDir.concat(ratisDir);
-      LOG.warn("Storage directory for Ratis is not configured. Mapping Ratis " +
-              "storage under {}. It is a good idea to map this to an SSD disk.",
-          storageDir);
-    }
-
-    // Get an available port on current node and
-    // use that as the container port
-    if (ozoneConf.getBoolean(OzoneConfigKeys
-            .DFS_CONTAINER_RATIS_IPC_RANDOM_PORT,
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT)) {
-      try (ServerSocket socket = new ServerSocket()) {
-        socket.setReuseAddress(true);
-        SocketAddress address = new InetSocketAddress(0);
-        socket.bind(address);
-        localPort = socket.getLocalPort();
-        LOG.info("Found a free port for the server : {}", localPort);
-        // If we have random local ports configured this means that it
-        // probably running under MiniOzoneCluster. Ratis locks the storage
-        // directories, so we need to pass different local directory for each
-        // local instance. So we map ratis directories under datanode ID.
-        storageDir =
-            storageDir.concat(File.separator + datanodeID.getDatanodeUuid());
-      } catch (IOException e) {
-        LOG.error("Unable find a random free port for the server, "
-            + "fallback to use default port {}", localPort, e);
-      }
-    }
-    datanodeID.setRatisPort(localPort);
-    return new XceiverServerRatis(datanodeID, localPort, storageDir,
-        dispatcher, ozoneConf);
-  }
-
-  @Override
-  public void start() throws IOException {
-    LOG.info("Starting {} {} at port {}", getClass().getSimpleName(),
-        server.getId(), getIPCPort());
-    writeChunkExecutor.prestartAllCoreThreads();
-    server.start();
-  }
-
-  @Override
-  public void stop() {
-    try {
-      writeChunkExecutor.shutdown();
-      server.close();
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-  @Override
-  public int getIPCPort() {
-    return port;
-  }
-
-  /**
-   * Returns the Replication type supported by this end-point.
-   *
-   * @return enum -- {Stand_Alone, Ratis, Chained}
-   */
-  @Override
-  public OzoneProtos.ReplicationType getServerType() {
-    return OzoneProtos.ReplicationType.RATIS;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/package-info.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/package-info.java
deleted file mode 100644
index 8debfe0..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.transport.server.ratis;
-
-/**
- * This package contains classes for the server implementation
- * using Apache Ratis
- */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java
deleted file mode 100644
index 6ae45b6..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.utils;
-
-import com.google.common.base.Preconditions;
-import org.apache.commons.collections.MapIterator;
-import org.apache.commons.collections.map.LRUMap;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.utils.MetadataStore;
-import org.apache.hadoop.utils.MetadataStoreBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-
-/**
- * container cache is a LRUMap that maintains the DB handles.
- */
-public final class ContainerCache extends LRUMap {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ContainerCache.class);
-  private final Lock lock = new ReentrantLock();
-  private static ContainerCache cache;
-  private static final float LOAD_FACTOR = 0.75f;
-  /**
-   * Constructs a cache that holds DBHandle references.
-   */
-  private ContainerCache(int maxSize, float loadFactor, boolean
-      scanUntilRemovable) {
-    super(maxSize, loadFactor, scanUntilRemovable);
-  }
-
-  /**
-   * Return a singleton instance of {@link ContainerCache}
-   * that holds the DB handlers.
-   *
-   * @param conf - Configuration.
-   * @return A instance of {@link ContainerCache}.
-   */
-  public synchronized static ContainerCache getInstance(Configuration conf) {
-    if (cache == null) {
-      int cacheSize = conf.getInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE,
-          OzoneConfigKeys.OZONE_CONTAINER_CACHE_DEFAULT);
-      cache = new ContainerCache(cacheSize, LOAD_FACTOR, true);
-    }
-    return cache;
-  }
-
-  /**
-   * Closes a db instance.
-   *
-   * @param container - name of the container to be closed.
-   * @param db - db instance to close.
-   */
-  private void closeDB(String container, MetadataStore db) {
-    if (db != null) {
-      try {
-        db.close();
-      } catch (IOException e) {
-        LOG.error("Error closing DB. Container: " + container, e);
-      }
-    }
-  }
-
-  /**
-   * Closes all the db instances and resets the cache.
-   */
-  public void shutdownCache() {
-    lock.lock();
-    try {
-      // iterate the cache and close each db
-      MapIterator iterator = cache.mapIterator();
-      while (iterator.hasNext()) {
-        iterator.next();
-        MetadataStore db = (MetadataStore) iterator.getValue();
-        closeDB(iterator.getKey().toString(), db);
-      }
-      // reset the cache
-      cache.clear();
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected boolean removeLRU(LinkEntry entry) {
-    lock.lock();
-    try {
-      MetadataStore db = (MetadataStore) entry.getValue();
-      closeDB(entry.getKey().toString(), db);
-    } finally {
-      lock.unlock();
-    }
-    return true;
-  }
-
-  /**
-   * Returns a DB handle if available, create the handler otherwise.
-   *
-   * @param containerName - Name of the container.
-   * @return MetadataStore.
-   */
-  public MetadataStore getDB(String containerName, String containerDBPath)
-      throws IOException {
-    Preconditions.checkNotNull(containerName);
-    Preconditions.checkState(!containerName.isEmpty());
-    lock.lock();
-    try {
-      MetadataStore db = (MetadataStore) this.get(containerName);
-
-      if (db == null) {
-        db = MetadataStoreBuilder.newBuilder()
-            .setDbFile(new File(containerDBPath))
-            .setCreateIfMissing(false)
-            .build();
-        this.put(containerName, db);
-      }
-      return db;
-    } catch (Exception e) {
-      LOG.error("Error opening DB. Container:{} ContainerPath:{}",
-          containerName, containerDBPath, e);
-      throw e;
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  /**
-   * Remove a DB handler from cache.
-   *
-   * @param containerName - Name of the container.
-   */
-  public void removeDB(String containerName) {
-    Preconditions.checkNotNull(containerName);
-    Preconditions.checkState(!containerName.isEmpty());
-    lock.lock();
-    try {
-      MetadataStore db = (MetadataStore)this.get(containerName);
-      closeDB(containerName, db);
-      this.remove(containerName);
-    } finally {
-      lock.unlock();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/utils/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/utils/package-info.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/utils/package-info.java
deleted file mode 100644
index 08264f0..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/utils/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.utils;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
deleted file mode 100644
index b143f22..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ /dev/null
@@ -1,269 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.ozoneimpl;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
-import org.apache.hadoop.ozone.container.common.impl.ChunkManagerImpl;
-import org.apache.hadoop.ozone.container.common.impl.ContainerManagerImpl;
-import org.apache.hadoop.ozone.container.common.impl.Dispatcher;
-import org.apache.hadoop.ozone.container.common.impl.KeyManagerImpl;
-import org.apache.hadoop.ozone.container.common.interfaces.ChunkManager;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
-import org.apache.hadoop.ozone.container.common.interfaces.KeyManager;
-import org.apache.hadoop.ozone.container.common.statemachine.background.BlockDeletingService;
-import org.apache.hadoop.ozone.container.common.transport.server.XceiverServer;
-import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis;
-
-import org.apache.hadoop.ozone.protocol.proto.OzoneProtos;
-import org.apache.hadoop.ozone.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
-import org.apache.hadoop.ozone.protocol.proto.StorageContainerDatanodeProtocolProtos.ReportState;
-import org.apache.hadoop.ozone.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMNodeReport;
-import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.nio.file.Paths;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
-import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ROOT_PREFIX;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConsts.INVALID_PORT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT;
-
-/**
- * Ozone main class sets up the network server and initializes the container
- * layer.
- */
-public class OzoneContainer {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OzoneContainer.class);
-
-  private final Configuration ozoneConfig;
-  private final ContainerDispatcher dispatcher;
-  private final ContainerManager manager;
-  private final XceiverServerSpi[] server;
-  private final ChunkManager chunkManager;
-  private final KeyManager keyManager;
-  private final BlockDeletingService blockDeletingService;
-
-  /**
-   * Creates a network endpoint and enables Ozone container.
-   *
-   * @param ozoneConfig - Config
-   * @throws IOException
-   */
-  public OzoneContainer(DatanodeID datanodeID, Configuration ozoneConfig) throws
-      IOException {
-    this.ozoneConfig = ozoneConfig;
-    List<StorageLocation> locations = new LinkedList<>();
-    String[] paths = ozoneConfig.getStrings(
-        OzoneConfigKeys.OZONE_METADATA_DIRS);
-    if (paths != null && paths.length > 0) {
-      for (String p : paths) {
-        locations.add(StorageLocation.parse(
-            Paths.get(p).resolve(CONTAINER_ROOT_PREFIX).toString()));
-      }
-    } else {
-      getDataDir(locations);
-    }
-
-    manager = new ContainerManagerImpl();
-    manager.init(this.ozoneConfig, locations, datanodeID);
-    this.chunkManager = new ChunkManagerImpl(manager);
-    manager.setChunkManager(this.chunkManager);
-
-    this.keyManager = new KeyManagerImpl(manager, ozoneConfig);
-    manager.setKeyManager(this.keyManager);
-
-    long svcInterval =
-        ozoneConfig.getTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
-        OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS);
-    long serviceTimeout = ozoneConfig.getTimeDuration(
-        OZONE_BLOCK_DELETING_SERVICE_TIMEOUT,
-        OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS);
-    this.blockDeletingService = new BlockDeletingService(manager,
-        svcInterval, serviceTimeout, ozoneConfig);
-
-    this.dispatcher = new Dispatcher(manager, this.ozoneConfig);
-
-    server = new XceiverServerSpi[]{
-        new XceiverServer(this.ozoneConfig, this.dispatcher),
-      XceiverServerRatis
-          .newXceiverServerRatis(datanodeID, ozoneConfig, dispatcher)
-    };
-  }
-
-  /**
-   * Starts serving requests to ozone container.
-   *
-   * @throws IOException
-   */
-  public void start() throws IOException {
-    for (XceiverServerSpi serverinstance : server) {
-      serverinstance.start();
-    }
-    blockDeletingService.start();
-    dispatcher.init();
-  }
-
-  /**
-   * Stops the ozone container.
-   * <p>
-   * Shutdown logic is not very obvious from the following code. if you need to
-   * modify the logic, please keep these comments in mind. Here is the shutdown
-   * sequence.
-   * <p>
-   * 1. We shutdown the network ports.
-   * <p>
-   * 2. Now we need to wait for all requests in-flight to finish.
-   * <p>
-   * 3. The container manager lock is a read-write lock with "Fairness"
-   * enabled.
-   * <p>
-   * 4. This means that the waiting threads are served in a "first-come-first
-   * -served" manner. Please note that this applies to waiting threads only.
-   * <p>
-   * 5. Since write locks are exclusive, if we are waiting to get a lock it
-   * implies that we are waiting for in-flight operations to complete.
-   * <p>
-   * 6. if there are other write operations waiting on the reader-writer lock,
-   * fairness guarantees that they will proceed before the shutdown lock
-   * request.
-   * <p>
-   * 7. Since all operations either take a reader or writer lock of container
-   * manager, we are guaranteed that we are the last operation since we have
-   * closed the network port, and we wait until close is successful.
-   * <p>
-   * 8. We take the writer lock and call shutdown on each of the managers in
-   * reverse order. That is chunkManager, keyManager and containerManager is
-   * shutdown.
-   */
-  public void stop() {
-    LOG.info("Attempting to stop container services.");
-    for(XceiverServerSpi serverinstance: server) {
-      serverinstance.stop();
-    }
-    dispatcher.shutdown();
-
-    try {
-      this.manager.writeLock();
-      this.chunkManager.shutdown();
-      this.keyManager.shutdown();
-      this.manager.shutdown();
-      this.blockDeletingService.shutdown();
-      LOG.info("container services shutdown complete.");
-    } catch (IOException ex) {
-      LOG.warn("container service shutdown error:", ex);
-    } finally {
-      this.manager.writeUnlock();
-    }
-  }
-
-  /**
-   * Returns a paths to data dirs.
-   *
-   * @param pathList - List of paths.
-   * @throws IOException
-   */
-  private void getDataDir(List<StorageLocation> pathList) throws IOException {
-    for (String dir : ozoneConfig.getStrings(DFS_DATANODE_DATA_DIR_KEY)) {
-      StorageLocation location = StorageLocation.parse(dir);
-      pathList.add(location);
-    }
-  }
-
-  /**
-   * Returns node report of container storage usage.
-   */
-  public SCMNodeReport getNodeReport() throws IOException {
-    return this.manager.getNodeReport();
-  }
-
-  private int getPortbyType(OzoneProtos.ReplicationType replicationType) {
-    for (XceiverServerSpi serverinstance : server) {
-      if (serverinstance.getServerType() == replicationType) {
-        return serverinstance.getIPCPort();
-      }
-    }
-    return INVALID_PORT;
-  }
-
-  /**
-   * Returns the container server IPC port.
-   *
-   * @return Container server IPC port.
-   */
-  public int getContainerServerPort() {
-    return getPortbyType(OzoneProtos.ReplicationType.STAND_ALONE);
-  }
-
-  /**
-   * Returns the Ratis container Server IPC port.
-   *
-   * @return Ratis port.
-   */
-  public int getRatisContainerServerPort() {
-    return getPortbyType(OzoneProtos.ReplicationType.RATIS);
-  }
-
-  /**
-   * Returns container report.
-   * @return - container report.
-   * @throws IOException
-   */
-  public ContainerReportsRequestProto getContainerReport() throws IOException {
-    return this.manager.getContainerReport();
-  }
-
-// TODO: remove getContainerReports
-  /**
-   * Returns the list of closed containers.
-   * @return - List of closed containers.
-   * @throws IOException
-   */
-  public List<ContainerData> getContainerReports() throws IOException {
-    return this.manager.getContainerReports();
-  }
-
-  @VisibleForTesting
-  public ContainerManager getContainerManager() {
-    return this.manager;
-  }
-
-  /**
-   * Get the container report state to send via HB to SCM.
-   * @return the container report state.
-   */
-  public ReportState getContainerReportState() {
-    return this.manager.getContainerReportState();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/package-info.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/package-info.java
deleted file mode 100644
index c99c038..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.ozoneimpl;
-/**
- Ozone main that calls into the container layer
-**/
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/ksm/BucketManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/ksm/BucketManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/ksm/BucketManager.java
deleted file mode 100644
index 6c75691..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/ksm/BucketManager.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * BucketManager handles all the bucket level operations.
- */
-public interface BucketManager {
-  /**
-   * Creates a bucket.
-   * @param bucketInfo - KsmBucketInfo for creating bucket.
-   */
-  void createBucket(KsmBucketInfo bucketInfo) throws IOException;
-  /**
-   * Returns Bucket Information.
-   * @param volumeName - Name of the Volume.
-   * @param bucketName - Name of the Bucket.
-   */
-  KsmBucketInfo getBucketInfo(String volumeName, String bucketName)
-      throws IOException;
-
-  /**
-   * Sets bucket property from args.
-   * @param args - BucketArgs.
-   * @throws IOException
-   */
-  void setBucketProperty(KsmBucketArgs args) throws IOException;
-
-  /**
-   * Deletes an existing empty bucket from volume.
-   * @param volumeName - Name of the volume.
-   * @param bucketName - Name of the bucket.
-   * @throws IOException
-   */
-  void deleteBucket(String volumeName, String bucketName) throws IOException;
-
-  /**
-   * Returns a list of buckets represented by {@link KsmBucketInfo}
-   * in the given volume.
-   *
-   * @param volumeName
-   *   Required parameter volume name determines buckets in which volume
-   *   to return.
-   * @param startBucket
-   *   Optional start bucket name parameter indicating where to start
-   *   the bucket listing from, this key is excluded from the result.
-   * @param bucketPrefix
-   *   Optional start key parameter, restricting the response to buckets
-   *   that begin with the specified name.
-   * @param maxNumOfBuckets
-   *   The maximum number of buckets to return. It ensures
-   *   the size of the result will not exceed this limit.
-   * @return a list of buckets.
-   * @throws IOException
-   */
-  List<KsmBucketInfo> listBuckets(String volumeName,
-      String startBucket, String bucketPrefix, int maxNumOfBuckets)
-      throws IOException;
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message