Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id A4D6D1071F for ; Tue, 14 Jan 2014 02:24:17 +0000 (UTC) Received: (qmail 73489 invoked by uid 500); 14 Jan 2014 02:24:12 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 73446 invoked by uid 500); 14 Jan 2014 02:24:10 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 73435 invoked by uid 99); 14 Jan 2014 02:24:09 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 14 Jan 2014 02:24:09 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 14 Jan 2014 02:24:05 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 937642388860; Tue, 14 Jan 2014 02:23:43 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1557907 - in /hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/test/java/org/apache/hadoop/hdfs/ Date: Tue, 14 Jan 2014 02:23:43 -0000 To: hdfs-commits@hadoop.apache.org From: jing9@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20140114022343.937642388860@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: jing9 Date: Tue Jan 14 02:23:42 2014 New Revision: 1557907 URL: http://svn.apache.org/r1557907 Log: HDFS-5579. Merge change r1557904 from trunk. Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1557907&r1=1557906&r2=1557907&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Jan 14 02:23:42 2014 @@ -265,6 +265,9 @@ Release 2.4.0 - UNRELEASED HDFS-5710. FSDirectory#getFullPathName should check inodes against null. (Uma Maheswara Rao G via jing9) + HDFS-5579. Under construction files make DataNode decommission take very long + hours. (zhaoyunjiong via jing9) + BREAKDOWN OF HDFS-2832 SUBTASKS AND RELATED JIRAS HDFS-4985. Add storage type to the protocol and expose it in block report Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java?rev=1557907&r1=1557906&r2=1557907&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java Tue Jan 14 02:23:42 2014 @@ -31,7 +31,7 @@ public interface BlockCollection { /** * Get the last block of the collection. */ - public BlockInfo getLastBlock() throws IOException; + public BlockInfo getLastBlock(); /** * Get content summary. Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1557907&r1=1557906&r2=1557907&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Tue Jan 14 02:23:42 2014 @@ -1214,8 +1214,10 @@ public class BlockManager { // block should belong to a file bc = blocksMap.getBlockCollection(block); // abandoned block or block reopened for append - if(bc == null || bc instanceof MutableBlockCollection) { - neededReplications.remove(block, priority); // remove from neededReplications + if(bc == null + || (bc instanceof MutableBlockCollection && block.equals(bc.getLastBlock()))) { + // remove from neededReplications + neededReplications.remove(block, priority); neededReplications.decrementReplicationIndex(priority); continue; } @@ -1297,7 +1299,7 @@ public class BlockManager { // block should belong to a file bc = blocksMap.getBlockCollection(block); // abandoned block or block reopened for append - if(bc == null || bc instanceof MutableBlockCollection) { + if(bc == null || (bc instanceof MutableBlockCollection && block.equals(bc.getLastBlock()))) { neededReplications.remove(block, priority); // remove from neededReplications rw.targets = null; neededReplications.decrementReplicationIndex(priority); @@ -2911,8 +2913,16 @@ assert storedBlock.findDatanode(dn) < 0 NumberReplicas num = countNodes(block); int curReplicas = num.liveReplicas(); int curExpectedReplicas = getReplication(block); + if (isNeededReplication(block, curExpectedReplicas, curReplicas)) { if (curExpectedReplicas > curReplicas) { + if (bc instanceof MutableBlockCollection) { + if (block.equals(bc.getLastBlock()) && curReplicas > minReplication) { + continue; + } + underReplicatedInOpenFiles++; + } + // Log info about one block for this node which needs replication if (!status) { status = true; @@ -2929,9 +2939,6 @@ assert storedBlock.findDatanode(dn) < 0 if ((curReplicas == 0) && (num.decommissionedReplicas() > 0)) { decommissionOnlyReplicas++; } - if (bc instanceof MutableBlockCollection) { - underReplicatedInOpenFiles++; - } } if (!neededReplications.contains(block) && pendingReplications.getNumReplicas(block) == 0) { Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java?rev=1557907&r1=1557906&r2=1557907&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java Tue Jan 14 02:23:42 2014 @@ -467,7 +467,7 @@ public class INodeFile extends INodeWith } @Override - public BlockInfo getLastBlock() throws IOException { + public BlockInfo getLastBlock() { return blocks == null || blocks.length == 0? null: blocks[blocks.length-1]; } Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java?rev=1557907&r1=1557906&r2=1557907&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java Tue Jan 14 02:23:42 2014 @@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.protocol.D import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.HostFileManager; import org.apache.hadoop.hdfs.server.namenode.NameNode; @@ -780,4 +781,53 @@ public class TestDecommission { Thread.sleep(HEARTBEAT_INTERVAL * 1000); } } + + @Test(timeout=120000) + public void testDecommissionWithOpenfile() throws IOException, InterruptedException { + LOG.info("Starting test testDecommissionWithOpenfile"); + + //At most 4 nodes will be decommissioned + startCluster(1, 7, conf); + + FileSystem fileSys = cluster.getFileSystem(0); + FSNamesystem ns = cluster.getNamesystem(0); + + String openFile = "/testDecommissionWithOpenfile.dat"; + + writeFile(fileSys, new Path(openFile), (short)3); + // make sure the file was open for write + FSDataOutputStream fdos = fileSys.append(new Path(openFile)); + + LocatedBlocks lbs = NameNodeAdapter.getBlockLocations(cluster.getNameNode(0), openFile, 0, fileSize); + + DatanodeInfo[] dnInfos4LastBlock = lbs.getLastLocatedBlock().getLocations(); + DatanodeInfo[] dnInfos4FirstBlock = lbs.get(0).getLocations(); + + ArrayList nodes = new ArrayList(); + ArrayList dnInfos = new ArrayList(); + + for (DatanodeInfo datanodeInfo : dnInfos4FirstBlock) { + DatanodeInfo found = datanodeInfo; + for (DatanodeInfo dif: dnInfos4LastBlock) { + if (datanodeInfo.equals(dif)) { + found = null; + } + } + if (found != null) { + nodes.add(found.getXferAddr()); + dnInfos.add(found); + } + } + //decommission one of the 3 nodes which have last block + nodes.add(dnInfos4LastBlock[0].getXferAddr()); + dnInfos.add(dnInfos4LastBlock[0]); + + writeConfigFile(excludeFile, nodes); + refreshNodes(ns, conf); + for (DatanodeInfo dn : dnInfos) { + waitNodeState(dn, AdminStates.DECOMMISSIONED); + } + + fdos.close(); + } }