Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 04995EAD7 for ; Fri, 1 Feb 2013 22:58:32 +0000 (UTC) Received: (qmail 86756 invoked by uid 500); 1 Feb 2013 22:58:31 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 86723 invoked by uid 500); 1 Feb 2013 22:58:31 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 86715 invoked by uid 99); 1 Feb 2013 22:58:31 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 01 Feb 2013 22:58:31 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 01 Feb 2013 22:58:30 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id ACAD423888E7; Fri, 1 Feb 2013 22:58:11 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1441656 - in /hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/test/java/org/apache/hadoop/hdfs/se... Date: Fri, 01 Feb 2013 22:58:11 -0000 To: hdfs-commits@hadoop.apache.org From: kihwal@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20130201225811.ACAD423888E7@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: kihwal Date: Fri Feb 1 22:58:11 2013 New Revision: 1441656 URL: http://svn.apache.org/viewvc?rev=1441656&view=rev Log: merge -r 1311379:1311380 Merging from trunk to branch-0.23 to fix HDFS-3119 Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1441656&r1=1441655&r2=1441656&view=diff ============================================================================== --- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Feb 1 22:58:11 2013 @@ -30,6 +30,10 @@ Release 0.23.7 - UNRELEASED HDFS-4444. Add space between total transaction time and number of transactions in FSEditLog#printStatistics. (Stephen Chu via tgraves) + HDFS-3119. Overreplicated block is not deleted even after the replication + factor is reduced after sync follwed by closing that file. (Ashish Singhi + via umamahesh) + Release 0.23.6 - UNRELEASED INCOMPATIBLE CHANGES Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1441656&r1=1441655&r2=1441656&view=diff ============================================================================== --- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original) +++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Fri Feb 1 22:58:11 2013 @@ -2468,7 +2468,7 @@ public class BlockManager { } } - public void checkReplication(Block block, int numExpectedReplicas) { + public void checkReplication(Block block, short numExpectedReplicas) { // filter out containingNodes that are marked for decommission. NumberReplicas number = countNodes(block); if (isNeededReplication(block, numExpectedReplicas, number.liveReplicas())) { @@ -2476,6 +2476,10 @@ public class BlockManager { number.liveReplicas(), number.decommissionedReplicas(), numExpectedReplicas); + return; + } + if (number.liveReplicas() > numExpectedReplicas) { + processOverReplicatedBlock(block, numExpectedReplicas, null, null); } } Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1441656&r1=1441655&r2=1441656&view=diff ============================================================================== --- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original) +++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Fri Feb 1 22:58:11 2013 @@ -1727,10 +1727,12 @@ public class FSNamesystem implements Nam /** * Check all blocks of a file. If any blocks are lower than their intended - * replication factor, then insert them into neededReplication + * replication factor, then insert them into neededReplication and if + * the blocks are more than the intended replication factor then insert + * them into invalidateBlocks. */ private void checkReplicationFactor(INodeFile file) { - int numExpectedReplicas = file.getReplication(); + short numExpectedReplicas = file.getReplication(); Block[] pendingBlocks = file.getBlocks(); int nrBlocks = pendingBlocks.length; for (int i = 0; i < nrBlocks; i++) { Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java?rev=1441656&r1=1441655&r2=1441656&view=diff ============================================================================== --- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java (original) +++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java Fri Feb 1 22:58:11 2013 @@ -17,12 +17,13 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; +import static org.junit.Assert.*; import java.io.File; import java.io.IOException; -import junit.framework.TestCase; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -36,13 +37,15 @@ import org.apache.hadoop.hdfs.protocol.E import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; +import org.junit.Test; -public class TestOverReplicatedBlocks extends TestCase { +public class TestOverReplicatedBlocks { /** Test processOverReplicatedBlock can handle corrupt replicas fine. * It make sure that it won't treat corrupt replicas as valid ones * thus prevents NN deleting valid replicas but keeping * corrupt ones. */ + @Test public void testProcesOverReplicateBlock() throws IOException { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L); @@ -113,4 +116,30 @@ public class TestOverReplicatedBlocks ex cluster.shutdown(); } } + /** + * Test over replicated block should get invalidated when decreasing the + * replication for a partial block. + */ + @Test + public void testInvalidateOverReplicatedBlock() throws Exception { + Configuration conf = new HdfsConfiguration(); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3) + .build(); + try { + final FSNamesystem namesystem = cluster.getNamesystem(); + final BlockManager bm = namesystem.getBlockManager(); + FileSystem fs = cluster.getFileSystem(); + Path p = new Path(MiniDFSCluster.getBaseDirectory(), "/foo1"); + FSDataOutputStream out = fs.create(p, (short) 2); + out.writeBytes("HDFS-3119: " + p); + out.hsync(); + fs.setReplication(p, (short) 1); + out.close(); + ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, p); + assertEquals("Expected only one live replica for the block", 1, bm + .countNodes(block.getLocalBlock()).liveReplicas()); + } finally { + cluster.shutdown(); + } + } }