Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 3B67610F49 for ; Mon, 12 May 2014 10:50:31 +0000 (UTC) Received: (qmail 94551 invoked by uid 500); 10 May 2014 23:26:20 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 22950 invoked by uid 500); 10 May 2014 23:13:05 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 18124 invoked by uid 99); 10 May 2014 22:57:34 -0000 Received: from Unknown (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Sat, 10 May 2014 22:57:34 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 08 May 2014 22:25:11 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 678AA23888E4; Thu, 8 May 2014 22:24:47 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1593438 - in /hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs: CHANGES.txt src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java Date: Thu, 08 May 2014 22:24:47 -0000 To: hdfs-commits@hadoop.apache.org From: arp@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20140508222447.678AA23888E4@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: arp Date: Thu May 8 22:24:46 2014 New Revision: 1593438 URL: http://svn.apache.org/r1593438 Log: HDFS-6340: Merging r1593437 from branch-2 to branch-2.4. Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1593438&r1=1593437&r2=1593438&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu May 8 22:24:46 2014 @@ -58,6 +58,8 @@ Release 2.4.1 - UNRELEASED HDFS-2882. DN continues to start up, even if block pool fails to initialize (vinayakumarb) + HDFS-6340. DN can't finalize upgrade. (Rahul Singhal via Arpit Agarwal) + Release 2.4.0 - 2014-04-07 INCOMPATIBLE CHANGES Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1593438&r1=1593437&r2=1593438&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java Thu May 8 22:24:46 2014 @@ -1013,15 +1013,15 @@ class NameNodeRpcServer implements Namen + "from " + nodeReg + ", reports.length=" + reports.length); } final BlockManager bm = namesystem.getBlockManager(); - boolean hasStaleStorages = true; + boolean noStaleStorages = false; for(StorageBlockReport r : reports) { final BlockListAsLongs blocks = new BlockListAsLongs(r.getBlocks()); - hasStaleStorages = bm.processReport(nodeReg, r.getStorage(), poolId, blocks); + noStaleStorages = bm.processReport(nodeReg, r.getStorage(), poolId, blocks); } if (nn.getFSImage().isUpgradeFinalized() && !nn.isStandbyState() && - !hasStaleStorages) { + noStaleStorages) { return new FinalizeCommand(poolId); } Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java?rev=1593438&r1=1593437&r2=1593438&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java Thu May 8 22:24:46 2014 @@ -30,6 +30,8 @@ import org.apache.commons.logging.LogFac import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; +import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage; +import org.apache.hadoop.hdfs.server.datanode.DataStorage; import org.junit.After; import org.junit.Test; @@ -61,11 +63,9 @@ public class TestDFSFinalize { * Verify that the current directory exists and that the previous directory * does not exist. Verify that current hasn't been modified by comparing * the checksum of all it's containing files with their original checksum. - * Note that we do not check that previous is removed on the DataNode - * because its removal is asynchronous therefore we have no reliable - * way to know when it will happen. */ - static void checkResult(String[] nameNodeDirs, String[] dataNodeDirs) throws Exception { + static void checkResult(String[] nameNodeDirs, String[] dataNodeDirs, + String bpid) throws Exception { List dirs = Lists.newArrayList(); for (int i = 0; i < nameNodeDirs.length; i++) { File curDir = new File(nameNodeDirs[i], "current"); @@ -76,15 +76,30 @@ public class TestDFSFinalize { FSImageTestUtil.assertParallelFilesAreIdentical( dirs, Collections.emptySet()); + File dnCurDirs[] = new File[dataNodeDirs.length]; for (int i = 0; i < dataNodeDirs.length; i++) { - assertEquals( - UpgradeUtilities.checksumContents( - DATA_NODE, new File(dataNodeDirs[i],"current")), + dnCurDirs[i] = new File(dataNodeDirs[i],"current"); + assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, dnCurDirs[i]), UpgradeUtilities.checksumMasterDataNodeContents()); } for (int i = 0; i < nameNodeDirs.length; i++) { assertFalse(new File(nameNodeDirs[i],"previous").isDirectory()); } + + if (bpid == null) { + for (int i = 0; i < dataNodeDirs.length; i++) { + assertFalse(new File(dataNodeDirs[i],"previous").isDirectory()); + } + } else { + for (int i = 0; i < dataNodeDirs.length; i++) { + File bpRoot = BlockPoolSliceStorage.getBpRoot(bpid, dnCurDirs[i]); + assertFalse(new File(bpRoot,"previous").isDirectory()); + + File bpCurFinalizeDir = new File(bpRoot,"current/"+DataStorage.STORAGE_DIR_FINALIZED); + assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, bpCurFinalizeDir), + UpgradeUtilities.checksumMasterBlockPoolFinalizedContents()); + } + } } /** @@ -106,7 +121,7 @@ public class TestDFSFinalize { String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY); String[] dataNodeDirs = conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY); - log("Finalize with existing previous dir", numDirs); + log("Finalize NN & DN with existing previous dir", numDirs); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current"); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous"); UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current"); @@ -118,11 +133,47 @@ public class TestDFSFinalize { .startupOption(StartupOption.REGULAR) .build(); cluster.finalizeCluster(conf); - checkResult(nameNodeDirs, dataNodeDirs); + cluster.triggerBlockReports(); + // 1 second should be enough for asynchronous DN finalize + Thread.sleep(1000); + checkResult(nameNodeDirs, dataNodeDirs, null); + + log("Finalize NN & DN without existing previous dir", numDirs); + cluster.finalizeCluster(conf); + cluster.triggerBlockReports(); + // 1 second should be enough for asynchronous DN finalize + Thread.sleep(1000); + checkResult(nameNodeDirs, dataNodeDirs, null); + + cluster.shutdown(); + UpgradeUtilities.createEmptyDirs(nameNodeDirs); + UpgradeUtilities.createEmptyDirs(dataNodeDirs); + + log("Finalize NN & BP with existing previous dir", numDirs); + String bpid = UpgradeUtilities.getCurrentBlockPoolID(cluster); + UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current"); + UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous"); + UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current"); + UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs, "current", bpid); + UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs, "previous", bpid); + cluster = new MiniDFSCluster.Builder(conf) + .format(false) + .manageDataDfsDirs(false) + .manageNameDfsDirs(false) + .startupOption(StartupOption.REGULAR) + .build(); + cluster.finalizeCluster(conf); + cluster.triggerBlockReports(); + // 1 second should be enough for asynchronous BP finalize + Thread.sleep(1000); + checkResult(nameNodeDirs, dataNodeDirs, bpid); - log("Finalize without existing previous dir", numDirs); + log("Finalize NN & BP without existing previous dir", numDirs); cluster.finalizeCluster(conf); - checkResult(nameNodeDirs, dataNodeDirs); + cluster.triggerBlockReports(); + // 1 second should be enough for asynchronous BP finalize + Thread.sleep(1000); + checkResult(nameNodeDirs, dataNodeDirs, bpid); cluster.shutdown(); UpgradeUtilities.createEmptyDirs(nameNodeDirs);