Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id DA767200C30 for ; Tue, 7 Mar 2017 19:32:01 +0100 (CET) Received: by cust-asf.ponee.io (Postfix) id D9089160B68; Tue, 7 Mar 2017 18:32:01 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 4045B160B65 for ; Tue, 7 Mar 2017 19:32:00 +0100 (CET) Received: (qmail 71649 invoked by uid 500); 7 Mar 2017 18:31:59 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 71640 invoked by uid 99); 7 Mar 2017 18:31:59 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 07 Mar 2017 18:31:59 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 43025DFC4A; Tue, 7 Mar 2017 18:31:59 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: aengineer@apache.org To: common-commits@hadoop.apache.org Message-Id: X-Mailer: ASF-Git Admin Mailer Subject: hadoop git commit: HDFS-11451. Ozone: Add protobuf definitions for container reports. Contributed by Anu Engineer. Date: Tue, 7 Mar 2017 18:31:59 +0000 (UTC) archived-at: Tue, 07 Mar 2017 18:32:02 -0000 Repository: hadoop Updated Branches: refs/heads/HDFS-7240 a13c9782a -> e37c5ed47 HDFS-11451. Ozone: Add protobuf definitions for container reports. Contributed by Anu Engineer. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e37c5ed4 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e37c5ed4 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e37c5ed4 Branch: refs/heads/HDFS-7240 Commit: e37c5ed47d553996ab88c521a3aab35fa27804bf Parents: a13c978 Author: Anu Engineer Authored: Tue Mar 7 10:28:21 2017 -0800 Committer: Anu Engineer Committed: Tue Mar 7 10:28:21 2017 -0800 ---------------------------------------------------------------------- .../statemachine/SCMConnectionManager.java | 4 +- .../common/statemachine/StateContext.java | 40 +++++++- .../states/endpoint/HeartbeatEndpointTask.java | 41 ++++++-- .../StorageContainerDatanodeProtocol.java | 4 +- .../protocol/commands/SendContainerCommand.java | 80 +++++++++++++++ ...rDatanodeProtocolClientSideTranslatorPB.java | 5 +- ...rDatanodeProtocolServerSideTranslatorPB.java | 3 +- .../ozone/scm/StorageContainerManager.java | 6 +- .../ozone/scm/container/ContainerMapping.java | 23 +++-- .../hadoop/ozone/scm/node/CommandQueue.java | 8 +- .../StorageContainerDatanodeProtocol.proto | 102 ++++++++++++++++--- .../ozone/container/common/ScmTestMock.java | 17 ++-- .../common/TestDatanodeStateMachine.java | 14 ++- .../ozone/container/common/TestEndPoint.java | 52 ++++++---- 14 files changed, 331 insertions(+), 68 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/e37c5ed4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java index 33d361b..a2384e8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java @@ -73,7 +73,7 @@ public class SCMConnectionManager { * * @return - Return RPC timeout. */ - public long getRpcTimeout() { + public int getRpcTimeout() { return rpcTimeout; } @@ -128,7 +128,7 @@ public class SCMConnectionManager { StorageContainerDatanodeProtocolPB rpcProxy = RPC.getProxy( StorageContainerDatanodeProtocolPB.class, version, address, UserGroupInformation.getCurrentUser(), conf, - NetUtils.getDefaultSocketFactory(conf), rpcTimeout); + NetUtils.getDefaultSocketFactory(conf), getRpcTimeout()); StorageContainerDatanodeProtocolClientSideTranslatorPB rpcClient = new StorageContainerDatanodeProtocolClientSideTranslatorPB(rpcProxy); http://git-wip-us.apache.org/repos/asf/hadoop/blob/e37c5ed4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java index 4fa8729..15a241e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java @@ -19,10 +19,10 @@ package org.apache.hadoop.ozone.container.common.statemachine; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ozone.container.common.states.datanode.InitDatanodeState; import org.apache.hadoop.ozone.container.common.states.DatanodeState; -import org.apache.hadoop.ozone.container.common.states.datanode - .RunningDatanodeState; +import org.apache.hadoop.ozone.container.common.states.datanode.RunningDatanodeState; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.hadoop.ozone.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMNodeReport; +import org.apache.hadoop.ozone.protocol.proto.StorageContainerDatanodeProtocolProtos.ReportState; import java.util.LinkedList; import java.util.Queue; @@ -34,6 +34,10 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +import static org.apache.hadoop.ozone.protocol.proto + .StorageContainerDatanodeProtocolProtos.ReportState.states + .noContainerReports; + /** * Current Context of State Machine. */ @@ -45,6 +49,9 @@ public class StateContext { private final Configuration conf; private DatanodeStateMachine.DatanodeStates state; private SCMNodeReport nrState; + private ReportState reportState; + private static final ReportState DEFAULT_REPORT_STATE = + ReportState.newBuilder().setState(noContainerReports).setCount(0).build(); /** * Constructs a StateContext. @@ -174,6 +181,7 @@ public class StateContext { if (isExiting(newState)) { task.onExit(); } + this.clearReportState(); this.setState(newState); } } @@ -215,4 +223,32 @@ public class StateContext { } + /** + * Gets the ReportState. + * @return ReportState. + */ + public synchronized ReportState getContainerReportState() { + if (reportState == null) { + return DEFAULT_REPORT_STATE; + } + return reportState; + } + + /** + * Sets the ReportState. + * @param rState - ReportState. + */ + public synchronized void setContainerReportState(ReportState rState) { + this.reportState = rState; + } + + /** + * Clears report state after it has been communicated. + */ + public synchronized void clearReportState() { + if(reportState != null) { + setContainerReportState(null); + } + } + } http://git-wip-us.apache.org/repos/asf/hadoop/blob/e37c5ed4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java index c0226bb..5aba687 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java @@ -26,6 +26,13 @@ import org.apache.hadoop.ozone.container.common.statemachine import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.protocol.proto .StorageContainerDatanodeProtocolProtos.ContainerNodeIDProto; +import org.apache.hadoop.ozone.protocol.commands.SendContainerCommand; +import org.apache.hadoop.ozone.protocol.proto + .StorageContainerDatanodeProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMCommandResponseProto; +import org.apache.hadoop.ozone.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -89,12 +96,13 @@ public class HeartbeatEndpointTask DatanodeID datanodeID = DatanodeID.getFromProtoBuf(this .containerNodeIDProto.getDatanodeID()); - rpcEndpoint.getEndPoint().sendHeartbeat(datanodeID, - this.context.getNodeReport()); + SCMHeartbeatResponseProto reponse = rpcEndpoint.getEndPoint() + .sendHeartbeat(datanodeID, this.context.getNodeReport(), + this.context.getContainerReportState()); + processResponse(reponse); rpcEndpoint.zeroMissedCount(); } catch (IOException ex) { - rpcEndpoint.logIfNeeded(ex - ); + rpcEndpoint.logIfNeeded(ex); } finally { rpcEndpoint.unlock(); } @@ -110,6 +118,27 @@ public class HeartbeatEndpointTask } /** + * Add this command to command processing Queue. + * + * @param response - SCMHeartbeat response. + */ + private void processResponse(SCMHeartbeatResponseProto response) { + for (SCMCommandResponseProto commandResponseProto : response + .getCommandsList()) { + if (commandResponseProto.getCmdType() == + StorageContainerDatanodeProtocolProtos.Type.nullCmd) { + //this.context.addCommand(NullCommand.newBuilder().build()); + LOG.debug("Discarding a null command from SCM."); + } + if (commandResponseProto.getCmdType() == + StorageContainerDatanodeProtocolProtos.Type.sendContainerReport) { + this.context.addCommand(SendContainerCommand.getFromProtobuf( + commandResponseProto.getSendReport())); + } + } + } + + /** * Builder class for HeartbeatEndpointTask. */ public static class Builder { @@ -138,8 +167,8 @@ public class HeartbeatEndpointTask /** * Sets the Config. * - * @param config - config - * @return Builder + * @param config - config + * @return Builder */ public Builder setConfig(Configuration config) { this.conf = config; http://git-wip-us.apache.org/repos/asf/hadoop/blob/e37c5ed4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java index e93baef..76f359c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.protocol; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.ozone.protocol.proto.StorageContainerDatanodeProtocolProtos.ReportState; import org.apache.hadoop.ozone.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto; import org.apache.hadoop.ozone.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; import org.apache.hadoop.ozone.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto; @@ -43,11 +44,12 @@ public interface StorageContainerDatanodeProtocol { * Used by data node to send a Heartbeat. * @param datanodeID - Datanode ID. * @param nodeReport - node report state + * @param reportState - container report state. * @return - SCMHeartbeatResponseProto * @throws IOException */ SCMHeartbeatResponseProto sendHeartbeat(DatanodeID datanodeID, - SCMNodeReport nodeReport) throws IOException; + SCMNodeReport nodeReport, ReportState reportState) throws IOException; /** * Register Datanode. http://git-wip-us.apache.org/repos/asf/hadoop/blob/e37c5ed4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocol/commands/SendContainerCommand.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocol/commands/SendContainerCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocol/commands/SendContainerCommand.java new file mode 100644 index 0000000..6a9fc44 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocol/commands/SendContainerCommand.java @@ -0,0 +1,80 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.protocol.commands; + +import org.apache.hadoop.ozone.protocol.proto + .StorageContainerDatanodeProtocolProtos.SendContainerReportProto; +import org.apache.hadoop.ozone.protocol.proto + .StorageContainerDatanodeProtocolProtos.Type; + +/** + * Allows a Datanode to send in the container report. + */ +public class SendContainerCommand extends SCMCommand { + /** + * Returns a NullCommand class from NullCommandResponse Proto. + * @param unused - unused + * @return NullCommand + */ + public static SendContainerCommand getFromProtobuf( + final SendContainerReportProto unused) { + return new SendContainerCommand(); + } + + /** + * returns a new builder. + * @return Builder + */ + public static SendContainerCommand.Builder newBuilder() { + return new SendContainerCommand.Builder(); + } + + /** + * Returns the type of this command. + * + * @return Type + */ + @Override + public Type getType() { + return Type.sendContainerReport; + } + + /** + * Gets the protobuf message of this object. + * + * @return A protobuf message. + */ + @Override + public byte[] getProtoBufMessage() { + return SendContainerReportProto.newBuilder().build().toByteArray(); + } + + /** + * A Builder class this is the standard pattern we are using for all commands. + */ + public static class Builder { + /** + * Return a null command. + * @return - NullCommand. + */ + public SendContainerCommand build() { + return new SendContainerCommand(); + } + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/e37c5ed4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java index 477fac5..78b46cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java @@ -24,6 +24,8 @@ import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol; import org.apache.hadoop.ozone.protocol.proto + .StorageContainerDatanodeProtocolProtos.ReportState; +import org.apache.hadoop.ozone.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; import org.apache.hadoop.ozone.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; @@ -121,11 +123,12 @@ public class StorageContainerDatanodeProtocolClientSideTranslatorPB @Override public SCMHeartbeatResponseProto sendHeartbeat(DatanodeID datanodeID, - SCMNodeReport nodeReport) throws IOException { + SCMNodeReport nodeReport, ReportState reportState) throws IOException { SCMHeartbeatRequestProto.Builder req = SCMHeartbeatRequestProto .newBuilder(); req.setDatanodeID(datanodeID.getProtoBufMessage()); req.setNodeReport(nodeReport); + req.setContainerReportState(reportState); final SCMHeartbeatResponseProto resp; try { resp = rpcProxy.sendHeartbeat(NULL_RPC_CONTROLLER, req.build()); http://git-wip-us.apache.org/repos/asf/hadoop/blob/e37c5ed4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java index 995290f..ca13305 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java @@ -78,7 +78,8 @@ public class StorageContainerDatanodeProtocolServerSideTranslatorPB SCMHeartbeatRequestProto request) throws ServiceException { try { return impl.sendHeartbeat(DatanodeID.getFromProtoBuf(request - .getDatanodeID()), request.getNodeReport()); + .getDatanodeID()), request.getNodeReport(), + request.getContainerReportState()); } catch (IOException e) { throw new ServiceException(e); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/e37c5ed4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/StorageContainerManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/StorageContainerManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/StorageContainerManager.java index 3f31e98..5820e6d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/StorageContainerManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/StorageContainerManager.java @@ -37,6 +37,8 @@ import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.hadoop.ozone.protocol.proto .StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.ozone.protocol.proto + .StorageContainerDatanodeProtocolProtos.ReportState; +import org.apache.hadoop.ozone.protocol.proto .StorageContainerDatanodeProtocolProtos.NullCmdResponseProto; import org.apache.hadoop.ozone.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMCommandResponseProto; @@ -391,12 +393,14 @@ public class StorageContainerManager * Used by data node to send a Heartbeat. * * @param datanodeID - Datanode ID. + * @param nodeReport - Node Report + * @param reportState - Container report ready info. * @return - SCMHeartbeatResponseProto * @throws IOException */ @Override public SCMHeartbeatResponseProto sendHeartbeat(DatanodeID datanodeID, - SCMNodeReport nodeReport) throws IOException { + SCMNodeReport nodeReport, ReportState reportState) throws IOException { List commands = getScmNodeManager().sendHeartbeat(datanodeID, nodeReport); List cmdReponses = new LinkedList<>(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/e37c5ed4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerMapping.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerMapping.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerMapping.java index 346cb88..6bcdb4e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerMapping.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerMapping.java @@ -49,7 +49,7 @@ public class ContainerMapping implements Mapping { private final long cacheSize; private final Lock lock; private final Charset encoding = Charset.forName("UTF-8"); - private final LevelDBStore store; + private final LevelDBStore containerStore; private final Random rand; /** @@ -75,11 +75,14 @@ public class ContainerMapping implements Mapping { throw new IllegalArgumentException("SCM metadata directory is not valid."); } - File dbPath = new File(scmMetaDataDir, "SCM.db"); Options options = new Options(); options.cacheSize(this.cacheSize * (1024L * 1024L)); options.createIfMissing(); - store = new LevelDBStore(dbPath, options); + + // Write the container name to pipeline mapping. + File containerDBPath = new File(scmMetaDataDir, "container.db"); + containerStore = new LevelDBStore(containerDBPath, options); + this.lock = new ReentrantLock(); rand = new Random(); } @@ -103,6 +106,8 @@ public class ContainerMapping implements Mapping { return pipeline; } + + /** * Returns the Pipeline from the container name. * @@ -114,7 +119,8 @@ public class ContainerMapping implements Mapping { Pipeline pipeline = null; lock.lock(); try { - byte[] pipelineBytes = store.get(containerName.getBytes(encoding)); + byte[] pipelineBytes = + containerStore.get(containerName.getBytes(encoding)); if (pipelineBytes == null) { throw new IOException("Specified key does not exist. key : " + containerName); @@ -145,7 +151,8 @@ public class ContainerMapping implements Mapping { lock.lock(); try { - byte[] pipelineBytes = store.get(containerName.getBytes(encoding)); + byte[] pipelineBytes = + containerStore.get(containerName.getBytes(encoding)); if (pipelineBytes != null) { throw new IOException("Specified container already exists. key : " + containerName); @@ -153,7 +160,7 @@ public class ContainerMapping implements Mapping { DatanodeID id = getDatanodeID(); if (id != null) { pipeline = newPipelineFromNodes(id, containerName); - store.put(containerName.getBytes(encoding), + containerStore.put(containerName.getBytes(encoding), pipeline.getProtobufMessage().toByteArray()); } } finally { @@ -193,8 +200,8 @@ public class ContainerMapping implements Mapping { */ @Override public void close() throws IOException { - if (store != null) { - store.close(); + if (containerStore != null) { + containerStore.close(); } } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/e37c5ed4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/node/CommandQueue.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/node/CommandQueue.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/node/CommandQueue.java index fce86f7..9a49fc5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/node/CommandQueue.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/node/CommandQueue.java @@ -39,7 +39,8 @@ public class CommandQueue { private final Map> commandMap; private final Lock lock; - private final List nullList; + // This map is used as default return value containing one null command. + private static final List DEFAULT_LIST = new LinkedList<>(); /** * Constructs a Command Queue. @@ -47,8 +48,7 @@ public class CommandQueue { public CommandQueue() { commandMap = new HashMap<>(); lock = new ReentrantLock(); - nullList = new LinkedList<>(); - nullList.add(NullCommand.newBuilder().build()); + DEFAULT_LIST.add(NullCommand.newBuilder().build()); } /** @@ -75,7 +75,7 @@ public class CommandQueue { } finally { lock.unlock(); } - return nullList; + return DEFAULT_LIST; } /** http://git-wip-us.apache.org/repos/asf/hadoop/blob/e37c5ed4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/StorageContainerDatanodeProtocol.proto ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/StorageContainerDatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/StorageContainerDatanodeProtocol.proto index ee1378f..38be4be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/StorageContainerDatanodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/StorageContainerDatanodeProtocol.proto @@ -47,15 +47,80 @@ import "DatanodeContainerProtocol.proto"; */ message SCMHeartbeatRequestProto { required DatanodeIDProto datanodeID = 1; - optional SCMNodeReport nodeReport= 2; + optional SCMNodeReport nodeReport = 2; + optional ReportState containerReportState = 3; } +enum ContainerState { + closed = 0; + open = 1; +} + +/** +NodeState contains messages from datanode to SCM saying that it has +some information that SCM might be interested in.*/ +message ReportState { + enum states { + noContainerReports = 0; + completeContinerReport = 1; + deltaContainerReport = 2; + } + required states state = 1; + required int64 count = 2 [default = 0]; +} + + +/** +This message is used to persist the information about a container in the +SCM database, This information allows SCM to startup faster and avoid having +all container info in memory all the time. + */ +message ContainerPersistanceProto { + required ContainerState state = 1; + required hadoop.hdfs.ozone.Pipeline pipeline = 2; + required ContainerInfo info = 3; +} + +/** +This message is used to do a quick look up of which containers are effected +if a node goes down +*/ +message NodeContianerMapping { + repeated string contianerName = 1; +} + + + +/** +A container report contains the following information. +*/ +message ContainerInfo { + required string containerName = 1; + repeated bytes finalhash = 2; + optional int64 size = 3; + optional int64 keycount = 4; +} + +/** +A set of container reports, max count is generally set to +8192 since that keeps the size of the reports under 1 MB. +*/ +message ContainerReports { + enum reportType { + fullReport = 0; + deltaReport = 1; + } + repeated ContainerInfo reports = 1; + required reportType type = 2; +} + + /** * This message is send along with the heart beat to report datanode * storage utilization by SCM. */ message SCMNodeReport { - repeated SCMStorageReport storageReport= 1; + repeated SCMStorageReport storageReport = 1; } message SCMStorageReport { @@ -124,12 +189,19 @@ message NullCmdResponseProto { } /** +This command tells the data node to send in the container report when possible +*/ +message SendContainerReportProto { +} + +/** Type of commands supported by SCM to datanode protocol. */ enum Type { nullCmd = 1; versionCommand = 2; registeredCommand = 3; + sendContainerReport = 4; } /* @@ -138,6 +210,10 @@ enum Type { message SCMCommandResponseProto { required Type cmdType = 2; // Type of the command optional NullCmdResponseProto nullCommand = 3; + optional SCMRegisteredCmdResponseProto registeredProto = 4; + optional SCMVersionResponseProto versionProto = 5; + optional SendContainerReportProto sendReport = 6; + } @@ -157,9 +233,9 @@ message SCMHeartbeatResponseProto { * Here is a simple state diagram that shows how a datanode would boot up and * communicate with SCM. * - * +-----------------------+ + * ----------------------- * | Start | - * +----------+------------+ + * ---------- ------------ * | * | * | @@ -167,19 +243,19 @@ message SCMHeartbeatResponseProto { * | * | * | - * +----------v-------------+ - * | Searching for SCM +------------+ - * +----------+-------------+ | + * ----------v------------- + * | Searching for SCM ------------ + * ---------- ------------- | * | | * | | - * | +----------v-------------+ + * | ----------v------------- * | | Register if needed | - * | +-----------+------------+ + * | ----------- ------------ * | | * v | - * +-----------+----------------+ | - * +---------+ Heartbeat state <--------+ - * | +--------^-------------------+ + * ----------- ---------------- | + * --------- Heartbeat state <-------- + * | --------^------------------- * | | * | | * | | @@ -187,7 +263,7 @@ message SCMHeartbeatResponseProto { * | | * | | * | | - * +------------------+ + * ------------------ * * * http://git-wip-us.apache.org/repos/asf/hadoop/blob/e37c5ed4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java index b20202e..a9787ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java @@ -20,10 +20,9 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol; import org.apache.hadoop.ozone.protocol.VersionResponse; import org.apache.hadoop.ozone.protocol.commands.NullCommand; -import org.apache.hadoop.ozone.protocol.proto - .StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMNodeReport; +import org.apache.hadoop.ozone.protocol.proto.StorageContainerDatanodeProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.StorageContainerDatanodeProtocolProtos.ReportState; +import org.apache.hadoop.ozone.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMNodeReport; import org.apache.hadoop.ozone.scm.VersionInfo; import java.io.IOException; @@ -37,6 +36,7 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol { private int rpcResponseDelay; private AtomicInteger heartbeatCount = new AtomicInteger(0); private AtomicInteger rpcCount = new AtomicInteger(0); + private ReportState reportState; /** * Returns the number of heartbeats made to this class. @@ -112,10 +112,11 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol { */ @Override public StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto - sendHeartbeat(DatanodeID datanodeID, SCMNodeReport nodeReport) - throws IOException { + sendHeartbeat(DatanodeID datanodeID, SCMNodeReport nodeReport, + ReportState reportState) throws IOException { rpcCount.incrementAndGet(); heartbeatCount.incrementAndGet(); + this.reportState = reportState; sleepIfNeeded(); StorageContainerDatanodeProtocolProtos.SCMCommandResponseProto cmdResponse = StorageContainerDatanodeProtocolProtos @@ -153,4 +154,8 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol { StorageContainerDatanodeProtocolProtos .SCMRegisteredCmdResponseProto.ErrorCode.success).build(); } + + public ReportState getReportState() { + return this.reportState; + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/e37c5ed4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java index d9e4cd7..b0e3616 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java @@ -26,6 +26,7 @@ import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManage import org.apache.hadoop.ozone.container.common.states.DatanodeState; import org.apache.hadoop.ozone.container.common.states.datanode.InitDatanodeState; import org.apache.hadoop.ozone.container.common.states.datanode.RunningDatanodeState; +import org.apache.hadoop.ozone.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.concurrent.HadoopExecutors; import org.junit.After; @@ -48,6 +49,8 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys + .OZONE_SCM_HEARTBEAT_RPC_TIMEOUT; /** * Tests the datanode state machine class and its states. @@ -65,6 +68,7 @@ public class TestDatanodeStateMachine { @Before public void setUp() throws Exception { conf = SCMTestUtils.getConf(); + conf.setInt(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT, 500); serverAddresses = new LinkedList<>(); scmServers = new LinkedList<>(); mockServers = new LinkedList<>(); @@ -194,9 +198,9 @@ public class TestDatanodeStateMachine { // This execute will invoke getVersion calls against all SCM endpoints // that we know of. - task.execute(executorService); - newState = task.await(2, TimeUnit.SECONDS); + task.execute(executorService); + newState = task.await(10, TimeUnit.SECONDS); // If we are in running state, we should be in running. Assert.assertEquals(DatanodeStateMachine.DatanodeStates.RUNNING, newState); @@ -246,8 +250,14 @@ public class TestDatanodeStateMachine { Assert.assertEquals(DatanodeStateMachine.DatanodeStates.RUNNING, newState); + for (ScmTestMock mock : mockServers) { Assert.assertEquals(1, mock.getHeartbeatCount()); + // Assert that heartbeat did indeed carry that State that we said + // have in the datanode. + Assert.assertEquals(mock.getReportState().getState().getNumber(), + StorageContainerDatanodeProtocolProtos.ReportState.states + .noContainerReports.getNumber()); } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/e37c5ed4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java index 9404393..58f74f4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java @@ -20,7 +20,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; +import org.apache.hadoop.ozone.container.common.statemachine + .DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine .EndpointStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; @@ -31,16 +32,18 @@ import org.apache.hadoop.ozone.container.common.states.endpoint import org.apache.hadoop.ozone.container.common.states.endpoint .VersionEndpointTask; import org.apache.hadoop.ozone.protocol.proto + .StorageContainerDatanodeProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto .StorageContainerDatanodeProtocolProtos.ContainerNodeIDProto; import org.apache.hadoop.ozone.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; import org.apache.hadoop.ozone.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMNodeReport; import org.apache.hadoop.ozone.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMStorageReport; -import org.apache.hadoop.ozone.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto; import org.apache.hadoop.ozone.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMStorageReport; +import org.apache.hadoop.ozone.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto; import org.apache.hadoop.ozone.protocol.proto .StorageContainerDatanodeProtocolProtos.Type; @@ -58,6 +61,9 @@ import java.net.InetSocketAddress; import java.util.UUID; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; +import static org.apache.hadoop.ozone.protocol.proto + .StorageContainerDatanodeProtocolProtos.ReportState.states + .noContainerReports; /** * Tests the endpoints. @@ -67,6 +73,27 @@ public class TestEndPoint { private static RPC.Server scmServer; private static ScmTestMock scmServerImpl; private static File testDir; + private static StorageContainerDatanodeProtocolProtos.ReportState + defaultReportState; + + @AfterClass + public static void tearDown() throws Exception { + if (scmServer != null) { + scmServer.stop(); + } + FileUtil.fullyDelete(testDir); + } + + @BeforeClass + public static void setUp() throws Exception { + serverAddress = SCMTestUtils.getReuseableAddress(); + scmServerImpl = new ScmTestMock(); + scmServer = SCMTestUtils.startScmRpcServer(SCMTestUtils.getConf(), + scmServerImpl, serverAddress, 10); + testDir = PathUtils.getTestDir(TestEndPoint.class); + defaultReportState = StorageContainerDatanodeProtocolProtos.ReportState. + newBuilder().setState(noContainerReports).build(); + } @Test /** @@ -255,7 +282,7 @@ public class TestEndPoint { srb.setCapacity(2000).setScmUsed(500).setRemaining(1500).build(); nrb.addStorageReport(srb); SCMHeartbeatResponseProto responseProto = rpcEndPoint.getEndPoint() - .sendHeartbeat(dataNode, nrb.build()); + .sendHeartbeat(dataNode, nrb.build(), defaultReportState); Assert.assertNotNull(responseProto); Assert.assertEquals(1, responseProto.getCommandsCount()); Assert.assertNotNull(responseProto.getCommandsList().get(0)); @@ -322,21 +349,4 @@ public class TestEndPoint { scmServerImpl.setRpcResponseDelay(0); Assert.assertThat(end - start, new LessOrEqual<>(rpcTimeout + tolerance)); } - - @AfterClass - public static void tearDown() throws Exception { - if (scmServer != null) { - scmServer.stop(); - } - FileUtil.fullyDelete(testDir); - } - - @BeforeClass - public static void setUp() throws Exception { - serverAddress = SCMTestUtils.getReuseableAddress(); - scmServerImpl = new ScmTestMock(); - scmServer = SCMTestUtils.startScmRpcServer(SCMTestUtils.getConf(), - scmServerImpl, serverAddress, 10); - testDir = PathUtils.getTestDir(TestEndPoint.class); - } } --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org For additional commands, e-mail: common-commits-help@hadoop.apache.org