From common-commits-return-93630-archive-asf-public=cust-asf.ponee.io@hadoop.apache.org Mon Mar 11 09:43:15 2019 Return-Path: X-Original-To: archive-asf-public@cust-asf.ponee.io Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by mx-eu-01.ponee.io (Postfix) with SMTP id 401B6180763 for ; Mon, 11 Mar 2019 10:43:14 +0100 (CET) Received: (qmail 44267 invoked by uid 500); 11 Mar 2019 09:43:13 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 44258 invoked by uid 99); 11 Mar 2019 09:43:13 -0000 Received: from ec2-52-202-80-70.compute-1.amazonaws.com (HELO gitbox.apache.org) (52.202.80.70) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 11 Mar 2019 09:43:13 +0000 Received: by gitbox.apache.org (ASF Mail Server at gitbox.apache.org, from userid 33) id 9A431879B1; Mon, 11 Mar 2019 09:43:12 +0000 (UTC) Date: Mon, 11 Mar 2019 09:43:04 +0000 To: "common-commits@hadoop.apache.org" Subject: [hadoop] 01/02: HDDS-1210. Ratis pipeline creation doesn't check raft client reply status during initialization. Contributed by Mukul Kumar Singh. MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: 8bit From: msingh@apache.org In-Reply-To: <155229738231.2947.10471873436565292167@gitbox.apache.org> References: <155229738231.2947.10471873436565292167@gitbox.apache.org> X-Git-Host: gitbox.apache.org X-Git-Repo: hadoop X-Git-Refname: refs/heads/ozone-0.4 X-Git-Reftype: branch X-Git-Rev: aa9de9a0fa110b2c9b1810e4287a04952b8fc932 X-Git-NotificationType: diff X-Git-Multimail-Version: 1.5.dev Auto-Submitted: auto-generated Message-Id: <20190311094312.9A431879B1@gitbox.apache.org> This is an automated email from the ASF dual-hosted git repository. msingh pushed a commit to branch ozone-0.4 in repository https://gitbox.apache.org/repos/asf/hadoop.git commit aa9de9a0fa110b2c9b1810e4287a04952b8fc932 Author: Mukul Kumar Singh AuthorDate: Wed Mar 6 21:38:21 2019 +0530 HDDS-1210. Ratis pipeline creation doesn't check raft client reply status during initialization. Contributed by Mukul Kumar Singh. (cherry picked from commit 2c3ec37738544107238f75d0ca781fd23bdc309b) --- .../hadoop/hdds/scm/pipeline/PipelineFactory.java | 7 ++++ .../hdds/scm/pipeline/RatisPipelineProvider.java | 2 +- .../hdds/scm/pipeline/RatisPipelineUtils.java | 15 ++++++-- .../hdds/scm/pipeline/SCMPipelineManager.java | 9 +++++ .../TestHealthyPipelineChillModeRule.java | 25 +++++++++++--- .../TestOneReplicaPipelineChillModeRule.java | 11 ++++-- .../scm/chillmode/TestSCMChillModeManager.java | 11 ++++-- .../container/TestCloseContainerEventHandler.java | 10 ++++-- .../hadoop/hdds/scm/node/TestDeadNodeHandler.java | 11 ++++++ .../scm/pipeline/MockRatisPipelineProvider.java | 40 ++++++++++++++++++++++ .../scm/pipeline/TestRatisPipelineProvider.java | 2 +- .../hdds/scm/pipeline/TestRatisPipelineUtils.java | 25 ++++++++++++-- .../hdds/scm/pipeline/TestSCMPipelineManager.java | 28 +++++++++++++-- 13 files changed, 176 insertions(+), 20 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java index 6774b7f..9a846ad 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.scm.pipeline; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; @@ -45,6 +46,12 @@ public final class PipelineFactory { new RatisPipelineProvider(nodeManager, stateManager, conf)); } + @VisibleForTesting + void setProvider(ReplicationType replicationType, + PipelineProvider provider) { + providers.put(replicationType, provider); + } + public Pipeline create(ReplicationType type, ReplicationFactor factor) throws IOException { return providers.get(type).create(factor); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java index 6126d58..b73f63d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java @@ -143,7 +143,7 @@ public class RatisPipelineProvider implements PipelineProvider { .build(); } - private void initializePipeline(Pipeline pipeline) throws IOException { + protected void initializePipeline(Pipeline pipeline) throws IOException { RatisPipelineUtils.createPipeline(pipeline, conf); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java index 201c034..89dfc0e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java @@ -28,6 +28,7 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.ratis.RatisHelper; import org.apache.ratis.client.RaftClient; import org.apache.ratis.grpc.GrpcTlsConfig; +import org.apache.ratis.protocol.RaftClientReply; import org.apache.ratis.protocol.RaftGroup; import org.apache.ratis.protocol.RaftGroupId; import org.apache.ratis.protocol.RaftPeer; @@ -71,7 +72,15 @@ public final class RatisPipelineUtils { final RaftGroup group = RatisHelper.newRaftGroup(pipeline); LOG.debug("creating pipeline:{} with {}", pipeline.getId(), group); callRatisRpc(pipeline.getNodes(), ozoneConf, - (raftClient, peer) -> raftClient.groupAdd(group, peer.getId())); + (raftClient, peer) -> { + RaftClientReply reply = raftClient.groupAdd(group, peer.getId()); + if (reply == null || !reply.isSuccess()) { + String msg = "Pipeline initialization failed for pipeline:" + + pipeline.getId() + " node:" + peer.getId(); + LOG.error(msg); + throw new IOException(msg); + } + }); } /** @@ -186,8 +195,8 @@ public final class RatisPipelineUtils { rpc.accept(client, p); } catch (IOException ioe) { exceptions.add( - new IOException("Failed invoke Ratis rpc " + rpc + " for " + d, - ioe)); + new IOException("Failed invoke Ratis rpc " + rpc + " for " + + d.getUuid(), ioe)); } }); if (!exceptions.isEmpty()) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java index 90facca..1bb0099 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java @@ -100,6 +100,15 @@ public class SCMPipelineManager implements PipelineManager { initializePipelineState(); } + public PipelineStateManager getStateManager() { + return stateManager; + } + + public void setPipelineProvider(ReplicationType replicationType, + PipelineProvider provider) { + pipelineFactory.setProvider(replicationType, provider); + } + private void initializePipelineState() throws IOException { if (pipelineStore.isEmpty()) { LOG.info("No pipeline exists in current db"); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/chillmode/TestHealthyPipelineChillModeRule.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/chillmode/TestHealthyPipelineChillModeRule.java index 61fbf19..3ee7a7c 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/chillmode/TestHealthyPipelineChillModeRule.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/chillmode/TestHealthyPipelineChillModeRule.java @@ -31,7 +31,8 @@ import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; +import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider; +import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider; import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher; import org.apache.hadoop.hdds.server.events.EventQueue; @@ -69,8 +70,13 @@ public class TestHealthyPipelineChillModeRule { HddsConfigKeys.HDDS_SCM_CHILLMODE_PIPELINE_AVAILABILITY_CHECK, true); - PipelineManager pipelineManager = new SCMPipelineManager(config, + SCMPipelineManager pipelineManager = new SCMPipelineManager(config, nodeManager, eventQueue); + PipelineProvider mockRatisProvider = + new MockRatisPipelineProvider(nodeManager, + pipelineManager.getStateManager(), config); + pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, + mockRatisProvider); SCMChillModeManager scmChillModeManager = new SCMChillModeManager( config, containers, pipelineManager, eventQueue); @@ -109,9 +115,15 @@ public class TestHealthyPipelineChillModeRule { HddsConfigKeys.HDDS_SCM_CHILLMODE_PIPELINE_AVAILABILITY_CHECK, true); - PipelineManager pipelineManager = new SCMPipelineManager(config, + SCMPipelineManager pipelineManager = new SCMPipelineManager(config, nodeManager, eventQueue); + PipelineProvider mockRatisProvider = + new MockRatisPipelineProvider(nodeManager, + pipelineManager.getStateManager(), config); + pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, + mockRatisProvider); + // Create 3 pipelines Pipeline pipeline1 = pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, @@ -178,8 +190,13 @@ public class TestHealthyPipelineChillModeRule { HddsConfigKeys.HDDS_SCM_CHILLMODE_PIPELINE_AVAILABILITY_CHECK, true); - PipelineManager pipelineManager = new SCMPipelineManager(config, + SCMPipelineManager pipelineManager = new SCMPipelineManager(config, nodeManager, eventQueue); + PipelineProvider mockRatisProvider = + new MockRatisPipelineProvider(nodeManager, + pipelineManager.getStateManager(), config); + pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, + mockRatisProvider); // Create 3 pipelines Pipeline pipeline1 = diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/chillmode/TestOneReplicaPipelineChillModeRule.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/chillmode/TestOneReplicaPipelineChillModeRule.java index f389a83..ac0d040 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/chillmode/TestOneReplicaPipelineChillModeRule.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/chillmode/TestOneReplicaPipelineChillModeRule.java @@ -27,9 +27,10 @@ import org.apache.hadoop.hdds.protocol.proto import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.MockNodeManager; +import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider; import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; +import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider; import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher; import org.apache.hadoop.hdds.server.events.EventQueue; @@ -51,7 +52,7 @@ public class TestOneReplicaPipelineChillModeRule { @Rule public TemporaryFolder folder = new TemporaryFolder(); private OneReplicaPipelineChillModeRule rule; - private PipelineManager pipelineManager; + private SCMPipelineManager pipelineManager; private EventQueue eventQueue; @@ -72,6 +73,12 @@ public class TestOneReplicaPipelineChillModeRule { new SCMPipelineManager(ozoneConfiguration, mockNodeManager, eventQueue); + PipelineProvider mockRatisProvider = + new MockRatisPipelineProvider(mockNodeManager, + pipelineManager.getStateManager(), ozoneConfiguration); + pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, + mockRatisProvider); + createPipelines(pipelineFactorThreeCount, HddsProtos.ReplicationFactor.THREE); createPipelines(pipelineFactorOneCount, diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/chillmode/TestSCMChillModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/chillmode/TestSCMChillModeManager.java index 957fe70..55dca16 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/chillmode/TestSCMChillModeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/chillmode/TestSCMChillModeManager.java @@ -35,9 +35,10 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolPro import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.MockNodeManager; +import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider; import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; +import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider; import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode; import org.apache.hadoop.hdds.server.events.EventQueue; @@ -239,9 +240,15 @@ public class TestSCMChillModeManager { config.setBoolean( HddsConfigKeys.HDDS_SCM_CHILLMODE_PIPELINE_AVAILABILITY_CHECK, true); - PipelineManager pipelineManager = new SCMPipelineManager(config, + SCMPipelineManager pipelineManager = new SCMPipelineManager(config, nodeManager, queue); + PipelineProvider mockRatisProvider = + new MockRatisPipelineProvider(nodeManager, + pipelineManager.getStateManager(), config); + pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, + mockRatisProvider); + Pipeline pipeline = pipelineManager.createPipeline( HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java index fec3f84..f7a5df7 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java @@ -24,7 +24,8 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; +import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider; +import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider; import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.ozone.container.common.SCMTestUtils; @@ -49,7 +50,7 @@ public class TestCloseContainerEventHandler { private static Configuration configuration; private static MockNodeManager nodeManager; - private static PipelineManager pipelineManager; + private static SCMPipelineManager pipelineManager; private static SCMContainerManager containerManager; private static long size; private static File testDir; @@ -67,6 +68,11 @@ public class TestCloseContainerEventHandler { nodeManager = new MockNodeManager(true, 10); pipelineManager = new SCMPipelineManager(configuration, nodeManager, eventQueue); + PipelineProvider mockRatisProvider = + new MockRatisPipelineProvider(nodeManager, + pipelineManager.getStateManager(), configuration); + pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, + mockRatisProvider); containerManager = new SCMContainerManager(configuration, nodeManager, pipelineManager, new EventQueue()); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java index 831ee96..6805210 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.protocol.proto @@ -44,6 +45,9 @@ import org.apache.hadoop.hdds.scm.container.ContainerReplica; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; +import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider; +import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; +import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher .NodeReportFromDatanode; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; @@ -83,6 +87,13 @@ public class TestDeadNodeHandler { eventQueue = new EventQueue(); scm = HddsTestUtils.getScm(conf); nodeManager = (SCMNodeManager) scm.getScmNodeManager(); + SCMPipelineManager manager = + (SCMPipelineManager)scm.getPipelineManager(); + PipelineProvider mockRatisProvider = + new MockRatisPipelineProvider(nodeManager, manager.getStateManager(), + conf); + manager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, + mockRatisProvider); containerManager = scm.getContainerManager(); deadNodeHandler = new DeadNodeHandler(nodeManager, containerManager); eventQueue.addHandler(SCMEvents.DEAD_NODE, deadNodeHandler); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java new file mode 100644 index 0000000..2282804 --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.pipeline; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.scm.node.NodeManager; + +import java.io.IOException; + +/** + * Mock Ratis Pipeline Provider for Mock Nodes. + */ +public class MockRatisPipelineProvider extends RatisPipelineProvider { + + public MockRatisPipelineProvider(NodeManager nodeManager, + PipelineStateManager stateManager, + Configuration conf) { + super(nodeManager, stateManager, conf); + } + + protected void initializePipeline(Pipeline pipeline) throws IOException { + // do nothing as the datanodes do not exists + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java index 6f385de..28f47cc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java @@ -46,7 +46,7 @@ public class TestRatisPipelineProvider { public void init() throws Exception { nodeManager = new MockNodeManager(true, 10); stateManager = new PipelineStateManager(new OzoneConfiguration()); - provider = new RatisPipelineProvider(nodeManager, + provider = new MockRatisPipelineProvider(nodeManager, stateManager, new OzoneConfiguration()); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineUtils.java index e56b888..2180834 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineUtils.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineUtils.java @@ -20,12 +20,16 @@ package org.apache.hadoop.hdds.scm.pipeline; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; +import org.junit.Assert; import org.junit.Test; +import java.io.IOException; +import java.util.ArrayList; import java.util.List; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -81,12 +85,29 @@ public class TestRatisPipelineUtils { init(3); // make sure a pipelines is created waitForPipelines(1); - for (HddsDatanodeService dn : cluster.getHddsDatanodes()) { + List dns = new ArrayList<>(cluster.getHddsDatanodes()); + + List pipelines = + pipelineManager.getPipelines(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE); + for (HddsDatanodeService dn : dns) { cluster.shutdownHddsDatanode(dn.getDatanodeDetails()); } + + // try creating another pipeline now + try { + RatisPipelineUtils.createPipeline(pipelines.get(0), conf); + Assert.fail("pipeline creation should fail after shutting down pipeline"); + } catch (IOException ioe) { + // in case the pipeline creation fails, MultipleIOException is thrown + Assert.assertTrue(ioe instanceof MultipleIOException); + } + // make sure pipelines is destroyed waitForPipelines(0); - cluster.startHddsDatanodes(); + for (HddsDatanodeService dn : dns) { + cluster.restartHddsDatanode(dn.getDatanodeDetails(), false); + } // make sure pipelines is created after node start waitForPipelines(1); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java index c871a56..4d8e3af 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java @@ -71,8 +71,13 @@ public class TestSCMPipelineManager { @Test public void testPipelineReload() throws IOException { - PipelineManager pipelineManager = + SCMPipelineManager pipelineManager = new SCMPipelineManager(conf, nodeManager, new EventQueue()); + PipelineProvider mockRatisProvider = + new MockRatisPipelineProvider(nodeManager, + pipelineManager.getStateManager(), conf); + pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, + mockRatisProvider); Set pipelines = new HashSet<>(); for (int i = 0; i < 5; i++) { Pipeline pipeline = pipelineManager @@ -85,6 +90,11 @@ public class TestSCMPipelineManager { // new pipeline manager should be able to load the pipelines from the db pipelineManager = new SCMPipelineManager(conf, nodeManager, new EventQueue()); + mockRatisProvider = + new MockRatisPipelineProvider(nodeManager, + pipelineManager.getStateManager(), conf); + pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, + mockRatisProvider); for (Pipeline p : pipelines) { pipelineManager.openPipeline(p.getId()); } @@ -102,8 +112,14 @@ public class TestSCMPipelineManager { @Test public void testRemovePipeline() throws IOException { - PipelineManager pipelineManager = + SCMPipelineManager pipelineManager = new SCMPipelineManager(conf, nodeManager, new EventQueue()); + PipelineProvider mockRatisProvider = + new MockRatisPipelineProvider(nodeManager, + pipelineManager.getStateManager(), conf); + pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, + mockRatisProvider); + Pipeline pipeline = pipelineManager .createPipeline(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); @@ -134,8 +150,14 @@ public class TestSCMPipelineManager { @Test public void testPipelineReport() throws IOException { EventQueue eventQueue = new EventQueue(); - PipelineManager pipelineManager = + SCMPipelineManager pipelineManager = new SCMPipelineManager(conf, nodeManager, eventQueue); + PipelineProvider mockRatisProvider = + new MockRatisPipelineProvider(nodeManager, + pipelineManager.getStateManager(), conf); + pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, + mockRatisProvider); + SCMChillModeManager scmChillModeManager = new SCMChillModeManager(new OzoneConfiguration(), new ArrayList<>(), pipelineManager, eventQueue); --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org For additional commands, e-mail: common-commits-help@hadoop.apache.org