Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id E3D0518916 for ; Fri, 14 Aug 2015 07:18:07 +0000 (UTC) Received: (qmail 69276 invoked by uid 500); 14 Aug 2015 07:17:50 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 69052 invoked by uid 500); 14 Aug 2015 07:17:49 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 66442 invoked by uid 99); 14 Aug 2015 07:17:48 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 14 Aug 2015 07:17:48 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 962D5E714E; Fri, 14 Aug 2015 07:17:48 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: sjlee@apache.org To: common-commits@hadoop.apache.org Date: Fri, 14 Aug 2015 07:18:23 -0000 Message-Id: <369e9ba56a834914b8ffa75540cf0a8b@git.apache.org> In-Reply-To: References: X-Mailer: ASF-Git Admin Mailer Subject: [37/43] hadoop git commit: HDFS-8245. Standby namenode doesn't process DELETED_BLOCK if the addblock request is in edit log. Contributed by Rushabh S Shah. (cherry picked from commit 2d4ae3d18bc530fa9f81ee616db8af3395705fb9) HDFS-8245. Standby namenode doesn't process DELETED_BLOCK if the addblock request is in edit log. Contributed by Rushabh S Shah. (cherry picked from commit 2d4ae3d18bc530fa9f81ee616db8af3395705fb9) (cherry picked from commit f264a5aeede7e144af11f5357c7f901993de8e12) Conflicts: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/470019e9 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/470019e9 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/470019e9 Branch: refs/heads/sjlee/hdfs-merge Commit: 470019e9b88e0fcede926442b91d102b595c7ace Parents: a776ef5 Author: Kihwal Lee Authored: Fri May 8 16:37:26 2015 -0500 Committer: Sangjin Lee Committed: Thu Aug 13 18:21:24 2015 -0700 ---------------------------------------------------------------------- .../server/blockmanagement/BlockManager.java | 24 ++++- .../server/datanode/TestBlockReplacement.java | 97 ++++++++++++++++++++ .../hdfs/server/namenode/ha/TestDNFencing.java | 4 - 3 files changed, 118 insertions(+), 7 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/470019e9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index e271d55..bb54402 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -2287,8 +2287,15 @@ public class BlockManager { if (LOG.isDebugEnabled()) { LOG.debug("Processing previouly queued message " + rbi); } - processAndHandleReportedBlock(rbi.getStorageInfo(), - rbi.getBlock(), rbi.getReportedState(), null); + if (rbi.getReportedState() == null) { + // This is a DELETE_BLOCK request + DatanodeStorageInfo storageInfo = rbi.getStorageInfo(); + removeStoredBlock(rbi.getBlock(), + storageInfo.getDatanodeDescriptor()); + } else { + processAndHandleReportedBlock(rbi.getStorageInfo(), + rbi.getBlock(), rbi.getReportedState(), null); + } } } @@ -2984,6 +2991,17 @@ public class BlockManager { } } + private void removeStoredBlock(DatanodeStorageInfo storageInfo, Block block, + DatanodeDescriptor node) { + if (shouldPostponeBlocksFromFuture && + namesystem.isGenStampInFuture(block)) { + queueReportedBlock(storageInfo, block, null, + QUEUE_REASON_FUTURE_GENSTAMP); + return; + } + removeStoredBlock(block, node); + } + /** * Modify (block-->datanode) map. Possibly generate replication tasks, if the * removed block is still valid. @@ -3171,7 +3189,7 @@ public class BlockManager { for (ReceivedDeletedBlockInfo rdbi : srdb.getBlocks()) { switch (rdbi.getStatus()) { case DELETED_BLOCK: - removeStoredBlock(rdbi.getBlock(), node); + removeStoredBlock(storageInfo, rdbi.getBlock(), node); deleted++; break; case RECEIVED_BLOCK: http://git-wip-us.apache.org/repos/asf/hadoop/blob/470019e9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java index e0d7964..86b77d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java @@ -42,7 +42,9 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.StorageType; +import org.apache.hadoop.hdfs.client.BlockReportOptions; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; @@ -51,8 +53,11 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; +import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.util.DataTransferThrottler; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.util.Time; import org.junit.Test; @@ -280,6 +285,98 @@ public class TestBlockReplacement { } /** + * Standby namenode doesn't queue Delete block request when the add block + * request is in the edit log which are yet to be read. + * @throws Exception + */ + @Test + public void testDeletedBlockWhenAddBlockIsInEdit() throws Exception { + Configuration conf = new HdfsConfiguration(); + cluster = new MiniDFSCluster.Builder(conf) + .nnTopology(MiniDFSNNTopology.simpleHATopology()) + .numDataNodes(1).build(); + DFSClient client = null; + try { + cluster.waitActive(); + assertEquals("Number of namenodes is not 2", 2, + cluster.getNumNameNodes()); + // Transitioning the namenode 0 to active. + cluster.transitionToActive(0); + assertTrue("Namenode 0 should be in active state", + cluster.getNameNode(0).isActiveState()); + assertTrue("Namenode 1 should be in standby state", + cluster.getNameNode(1).isStandbyState()); + + // Trigger heartbeat to mark DatanodeStorageInfo#heartbeatedSinceFailover + // to true. + DataNodeTestUtils.triggerHeartbeat(cluster.getDataNodes().get(0)); + FileSystem fs = cluster.getFileSystem(0); + + // Trigger blockReport to mark DatanodeStorageInfo#blockContentsStale + // to false. + cluster.getDataNodes().get(0).triggerBlockReport( + new BlockReportOptions.Factory().setIncremental(false).build()); + + Path fileName = new Path("/tmp.txt"); + // create a file with one block + DFSTestUtil.createFile(fs, fileName, 10L, (short)1, 1234L); + DFSTestUtil.waitReplication(fs,fileName, (short)1); + + client = new DFSClient(cluster.getFileSystem(0).getUri(), conf); + List locatedBlocks = client.getNamenode(). + getBlockLocations("/tmp.txt", 0, 10L).getLocatedBlocks(); + assertTrue(locatedBlocks.size() == 1); + assertTrue(locatedBlocks.get(0).getLocations().length == 1); + + // add a second datanode to the cluster + cluster.startDataNodes(conf, 1, true, null, null, null, null); + assertEquals("Number of datanodes should be 2", 2, + cluster.getDataNodes().size()); + + DataNode dn0 = cluster.getDataNodes().get(0); + DataNode dn1 = cluster.getDataNodes().get(1); + String activeNNBPId = cluster.getNamesystem(0).getBlockPoolId(); + DatanodeDescriptor sourceDnDesc = NameNodeAdapter.getDatanode( + cluster.getNamesystem(0), dn0.getDNRegistrationForBP(activeNNBPId)); + DatanodeDescriptor destDnDesc = NameNodeAdapter.getDatanode( + cluster.getNamesystem(0), dn1.getDNRegistrationForBP(activeNNBPId)); + + ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName); + + LOG.info("replaceBlock: " + replaceBlock(block, + (DatanodeInfo)sourceDnDesc, (DatanodeInfo)sourceDnDesc, + (DatanodeInfo)destDnDesc)); + // Waiting for the FsDatasetAsyncDsikService to delete the block + Thread.sleep(3000); + // Triggering the incremental block report to report the deleted block to + // namnemode + cluster.getDataNodes().get(0).triggerBlockReport( + new BlockReportOptions.Factory().setIncremental(true).build()); + + cluster.transitionToStandby(0); + cluster.transitionToActive(1); + + assertTrue("Namenode 1 should be in active state", + cluster.getNameNode(1).isActiveState()); + assertTrue("Namenode 0 should be in standby state", + cluster.getNameNode(0).isStandbyState()); + client.close(); + + // Opening a new client for new active namenode + client = new DFSClient(cluster.getFileSystem(1).getUri(), conf); + List locatedBlocks1 = client.getNamenode() + .getBlockLocations("/tmp.txt", 0, 10L).getLocatedBlocks(); + + assertEquals(1, locatedBlocks1.size()); + assertEquals("The block should be only on 1 datanode ", 1, + locatedBlocks1.get(0).getLocations().length); + } finally { + IOUtils.cleanup(null, client); + cluster.shutdown(); + } + } + + /** * @param args */ public static void main(String[] args) throws Exception { http://git-wip-us.apache.org/repos/asf/hadoop/blob/470019e9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java index a538b6e..3748328 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java @@ -156,10 +156,6 @@ public class TestDNFencing { banner("NN2 Metadata immediately after failover"); doMetasave(nn2); - // Even though NN2 considers the blocks over-replicated, it should - // post-pone the block invalidation because the DNs are still "stale". - assertEquals(30, nn2.getNamesystem().getPostponedMisreplicatedBlocks()); - banner("Triggering heartbeats and block reports so that fencing is completed"); cluster.triggerHeartbeats(); cluster.triggerBlockReports();