Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id 6A853200D0D for ; Fri, 25 Aug 2017 22:05:05 +0200 (CEST) Received: by cust-asf.ponee.io (Postfix) id 6903D16D292; Fri, 25 Aug 2017 20:05:05 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 884E616D28D for ; Fri, 25 Aug 2017 22:05:04 +0200 (CEST) Received: (qmail 33656 invoked by uid 500); 25 Aug 2017 20:05:02 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 33647 invoked by uid 99); 25 Aug 2017 20:05:02 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 25 Aug 2017 20:05:02 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 94DADE053D; Fri, 25 Aug 2017 20:04:59 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: kihwal@apache.org To: common-commits@hadoop.apache.org Message-Id: X-Mailer: ASF-Git Admin Mailer Subject: hadoop git commit: HDFS-12299. Race Between update pipeline and DN Re-Registration. Contributed by Brahma Reddy Battula. Date: Fri, 25 Aug 2017 20:04:59 +0000 (UTC) archived-at: Fri, 25 Aug 2017 20:05:05 -0000 Repository: hadoop Updated Branches: refs/heads/branch-2.8 b07eef43a -> b0e951725 HDFS-12299. Race Between update pipeline and DN Re-Registration. Contributed by Brahma Reddy Battula. (cherry picked from commit 5a83ffa396089972e23c533eca33c9cba231c45a) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0e95172 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0e95172 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0e95172 Branch: refs/heads/branch-2.8 Commit: b0e951725f3a76da1d0db6b681540e1d5b8cfb01 Parents: b07eef4 Author: Kihwal Lee Authored: Fri Aug 25 15:04:37 2017 -0500 Committer: Kihwal Lee Committed: Fri Aug 25 15:04:37 2017 -0500 ---------------------------------------------------------------------- .../org/apache/hadoop/hdfs/DataStreamer.java | 3 +- .../BlockUnderConstructionFeature.java | 2 +- .../TestClientProtocolForPipelineRecovery.java | 50 ++++++++++++++++++++ 3 files changed, 53 insertions(+), 2 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0e95172/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java index 92f13d8..879c4ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java @@ -1572,7 +1572,8 @@ class DataStreamer extends Daemon { } /** update pipeline at the namenode */ - private void updatePipeline(long newGS) throws IOException { + @VisibleForTesting + void updatePipeline(long newGS) throws IOException { final ExtendedBlock oldBlock = block.getCurrentBlock(); // the new GS has been propagated to all DN, it should be ok to update the // local block state http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0e95172/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java index c43ace7..7bdd5eb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java @@ -84,7 +84,7 @@ public class BlockUnderConstructionFeature { for(int i = 0; i < targets.length; i++) { // Only store non-null DatanodeStorageInfo. if (targets[i] != null) { - replicas[i] = new ReplicaUnderConstruction(block, + replicas[offset++] = new ReplicaUnderConstruction(block, targets[i], ReplicaState.RBW); } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0e95172/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java index 50a9793..fe06936 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java @@ -38,6 +38,9 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.BlockWrite; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector; @@ -705,4 +708,51 @@ public class TestClientProtocolForPipelineRecovery { cluster.shutdown(); } } + + @Test + public void testUpdatePipeLineAfterDNReg()throws Exception { + Configuration conf = new HdfsConfiguration(); + MiniDFSCluster cluster = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); + cluster.waitActive(); + FileSystem fileSys = cluster.getFileSystem(); + + Path file = new Path("/testUpdatePipeLineAfterDNReg"); + FSDataOutputStream out = fileSys.create(file); + out.write(1); + out.hflush(); + //Get the First DN and disable the heartbeats and then put in Deadstate + DFSOutputStream dfsOut = (DFSOutputStream) out.getWrappedStream(); + DatanodeInfo[] pipeline = dfsOut.getPipeline(); + DataNode dn1 = cluster.getDataNode(pipeline[0].getIpcPort()); + dn1.setHeartbeatsDisabledForTests(true); + DatanodeDescriptor dn1Desc = cluster.getNamesystem(0).getBlockManager() + .getDatanodeManager().getDatanode(dn1.getDatanodeId()); + cluster.setDataNodeDead(dn1Desc); + //Re-register the DeadNode + DatanodeProtocolClientSideTranslatorPB dnp = + new DatanodeProtocolClientSideTranslatorPB( + cluster.getNameNode().getNameNodeAddress(), conf); + dnp.registerDatanode( + dn1.getDNRegistrationForBP(cluster.getNamesystem().getBlockPoolId())); + DFSOutputStream dfsO = (DFSOutputStream) out.getWrappedStream(); + String clientName = ((DistributedFileSystem) fileSys).getClient() + .getClientName(); + NamenodeProtocols namenode = cluster.getNameNodeRpc(); + //Update the genstamp and call updatepipeline + LocatedBlock newBlock = namenode + .updateBlockForPipeline(dfsO.getBlock(), clientName); + dfsO.getStreamer() + .updatePipeline(newBlock.getBlock().getGenerationStamp()); + newBlock = namenode.updateBlockForPipeline(dfsO.getBlock(), clientName); + //Should not throw any error Pipeline should be success + dfsO.getStreamer() + .updatePipeline(newBlock.getBlock().getGenerationStamp()); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } } --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org For additional commands, e-mail: common-commits-help@hadoop.apache.org