Return-Path: Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: (qmail 47755 invoked from network); 19 Oct 2010 17:44:56 -0000 Received: from unknown (HELO mail.apache.org) (140.211.11.3) by 140.211.11.9 with SMTP; 19 Oct 2010 17:44:56 -0000 Received: (qmail 80014 invoked by uid 500); 19 Oct 2010 17:44:56 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 79923 invoked by uid 500); 19 Oct 2010 17:44:55 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 79915 invoked by uid 99); 19 Oct 2010 17:44:55 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 19 Oct 2010 17:44:55 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=10.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 19 Oct 2010 17:44:51 +0000 Received: by eris.apache.org (Postfix, from userid 65534) id 2F5A123889B9; Tue, 19 Oct 2010 17:43:54 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1024336 - in /hadoop/hdfs/branches/HDFS-1052: ./ src/java/org/apache/hadoop/hdfs/protocol/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/test/hdfs/org/apache/hadoop/hdfs/ src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/ src/tes... Date: Tue, 19 Oct 2010 17:43:53 -0000 To: hdfs-commits@hadoop.apache.org From: suresh@apache.org X-Mailer: svnmailer-1.0.8 Message-Id: <20101019174354.2F5A123889B9@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: suresh Date: Tue Oct 19 17:43:53 2010 New Revision: 1024336 URL: http://svn.apache.org/viewvc?rev=1024336&view=rev Log: HDFS-1449. Fix test failures - ExtendedBlock must return block file name in #getBlockName(). Contributed by Suresh Srinivas. Modified: hadoop/hdfs/branches/HDFS-1052/CHANGES.txt hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestCrcCorruption.java hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java Modified: hadoop/hdfs/branches/HDFS-1052/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/CHANGES.txt?rev=1024336&r1=1024335&r2=1024336&view=diff ============================================================================== --- hadoop/hdfs/branches/HDFS-1052/CHANGES.txt (original) +++ hadoop/hdfs/branches/HDFS-1052/CHANGES.txt Tue Oct 19 17:43:53 2010 @@ -309,6 +309,9 @@ Trunk (unreleased changes) HDFS-1440. Fix TestComputeInvalidateWork failure. (suresh) + HDFS-1449. Fix test failures - ExtendedBlock must return + block file name in #getBlockName(). (suresh) + Release 0.21.0 - Unreleased INCOMPATIBLE CHANGES Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java?rev=1024336&r1=1024335&r2=1024336&view=diff ============================================================================== --- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java (original) +++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java Tue Oct 19 17:43:53 2010 @@ -21,6 +21,8 @@ import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.DeprecatedUTF8; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableFactories; @@ -29,6 +31,8 @@ import org.apache.hadoop.io.WritableFact /** * Identifies a Block uniquely across the block pools */ +@InterfaceAudience.Private +@InterfaceStability.Evolving public class ExtendedBlock implements Writable { private String poolId; private Block block; @@ -95,8 +99,9 @@ public class ExtendedBlock implements Wr return poolId; } + /** Returns the block file name for the block */ public String getBlockName() { - return poolId + ":" + block; + return block.getBlockName(); } public long getNumBytes() { @@ -155,6 +160,6 @@ public class ExtendedBlock implements Wr @Override // Object public String toString() { - return getBlockName(); + return poolId + ":" + block; } } Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java?rev=1024336&r1=1024335&r2=1024336&view=diff ============================================================================== --- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java (original) +++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java Tue Oct 19 17:43:53 2010 @@ -311,7 +311,8 @@ public class NamenodeFsck { if (isCorrupt) { corrupt++; res.corruptBlocks++; - out.print("\n" + path + ": CORRUPT block " + block.getBlockName()+"\n"); + out.print("\n" + path + ": CORRUPT blockpool " + block.getPoolId() + + " block " + block.getBlockName()+"\n"); } if (locs.length >= minReplication) res.numMinReplicatedBlocks++; Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1024336&r1=1024335&r2=1024336&view=diff ============================================================================== --- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java (original) +++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java Tue Oct 19 17:43:53 2010 @@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.protocol.B import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; import org.apache.hadoop.hdfs.server.datanode.DataNode; @@ -689,20 +690,23 @@ public class MiniDFSCluster { /* * Corrupt a block on all datanode */ - void corruptBlockOnDataNodes(String blockName) throws Exception{ + void corruptBlockOnDataNodes(ExtendedBlock block) throws Exception{ for (int i=0; i < dataNodes.size(); i++) - corruptBlockOnDataNode(i,blockName); + corruptBlockOnDataNode(i, block); } /* * Corrupt a block on a particular datanode */ - boolean corruptBlockOnDataNode(int i, String blockName) throws Exception { + boolean corruptBlockOnDataNode(int i, ExtendedBlock blk) throws Exception { Random random = new Random(); boolean corrupted = false; File dataDir = new File(getBaseDirectory() + "data"); if (i < 0 || i >= dataNodes.size()) return false; + + // TODO:FEDERATION use blockPoolId + String blockName = blk.getBlockName(); for (int dn = i*2; dn < i*2+2; dn++) { File blockFile = new File(dataDir, "data" + (dn+1) + FINALIZED_DIR_NAME + blockName); Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestCrcCorruption.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestCrcCorruption.java?rev=1024336&r1=1024335&r2=1024336&view=diff ============================================================================== --- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestCrcCorruption.java (original) +++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestCrcCorruption.java Tue Oct 19 17:43:53 2010 @@ -31,6 +31,7 @@ import static org.junit.Assert.*; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.io.IOUtils; /** @@ -256,7 +257,7 @@ public class TestCrcCorruption { DFSTestUtil.createFile(fs, file, fileSize, (short)numDataNodes, 12345L /*seed*/); DFSTestUtil.waitReplication(fs, file, (short)numDataNodes); - String block = DFSTestUtil.getFirstBlock(fs, file).getBlockName(); + ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file); cluster.corruptBlockOnDataNodes(block); try { Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java?rev=1024336&r1=1024335&r2=1024336&view=diff ============================================================================== --- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java (original) +++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java Tue Oct 19 17:43:53 2010 @@ -140,10 +140,12 @@ public class TestDatanodeBlockScanner ex cluster.shutdown(); } - public static boolean corruptReplica(String blockName, int replica) throws IOException { + public static boolean corruptReplica(ExtendedBlock blk, int replica) throws IOException { + String blockName = blk.getLocalBlock().getBlockName(); Random random = new Random(); File baseDir = new File(MiniDFSCluster.getBaseDirectory(), "data"); boolean corrupted = false; + // TODO:FEDERATION use BlockPoolId for (int i=replica*2; i