Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 4BBAA2069 for ; Fri, 29 Apr 2011 18:17:49 +0000 (UTC) Received: (qmail 22519 invoked by uid 500); 29 Apr 2011 18:17:49 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 22486 invoked by uid 500); 29 Apr 2011 18:17:49 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 22478 invoked by uid 99); 29 Apr 2011 18:17:49 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 29 Apr 2011 18:17:49 +0000 X-ASF-Spam-Status: No, hits=-1994.5 required=5.0 tests=ALL_TRUSTED,FB_GET_MEDS,URIBL_BLACK X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 29 Apr 2011 18:17:36 +0000 Received: by eris.apache.org (Postfix, from userid 65534) id D1AFB2388C22; Fri, 29 Apr 2011 18:16:41 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1097905 [13/14] - in /hadoop/hdfs/trunk: ./ bin/ src/c++/libhdfs/ src/contrib/hdfsproxy/ src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ src/java/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/protocol/ src/j... Date: Fri, 29 Apr 2011 18:16:38 -0000 To: hdfs-commits@hadoop.apache.org From: suresh@apache.org X-Mailer: svnmailer-1.0.8 Message-Id: <20110429181641.D1AFB2388C22@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java?rev=1097905&r1=1097904&r2=1097905&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java Fri Apr 29 18:16:32 2011 @@ -27,6 +27,7 @@ import org.apache.hadoop.conf.Configurat import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams; @@ -34,15 +35,10 @@ import org.apache.hadoop.util.DataChecks /** * this class tests the methods of the SimulatedFSDataset. - * */ - public class TestSimulatedFSDataset extends TestCase { - Configuration conf = null; - - - + static final String bpid = "BP-TEST"; static final int NUMBLOCKS = 20; static final int BLOCK_LENGTH_MULTIPLIER = 79; @@ -50,7 +46,6 @@ public class TestSimulatedFSDataset exte super.setUp(); conf = new HdfsConfiguration(); conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); - } protected void tearDown() throws Exception { @@ -61,10 +56,13 @@ public class TestSimulatedFSDataset exte return blkid*BLOCK_LENGTH_MULTIPLIER; } - int addSomeBlocks(FSDatasetInterface fsdataset, int startingBlockId) throws IOException { + int addSomeBlocks(FSDatasetInterface fsdataset, int startingBlockId) + throws IOException { int bytesAdded = 0; for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) { - Block b = new Block(i, 0, 0); // we pass expected len as zero, - fsdataset should use the sizeof actual data written + ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0); + // we pass expected len as zero, - fsdataset should use the sizeof actual + // data written ReplicaInPipelineInterface bInfo = fsdataset.createRbw(b); BlockWriteStreams out = bInfo.createStreams(true, 512, 4); try { @@ -89,8 +87,8 @@ public class TestSimulatedFSDataset exte } public void testGetMetaData() throws IOException { - FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); - Block b = new Block(1, 5, 0); + FSDatasetInterface fsdataset = getSimulatedFSDataset(); + ExtendedBlock b = new ExtendedBlock(bpid, 1, 5, 0); try { assertFalse(fsdataset.metaFileExists(b)); assertTrue("Expected an IO exception", false); @@ -98,7 +96,7 @@ public class TestSimulatedFSDataset exte // ok - as expected } addSomeBlocks(fsdataset); // Only need to add one but .... - b = new Block(1, 0, 0); + b = new ExtendedBlock(bpid, 1, 0, 0); InputStream metaInput = fsdataset.getMetaDataInputStream(b); DataInputStream metaDataInput = new DataInputStream(metaInput); short version = metaDataInput.readShort(); @@ -110,19 +108,18 @@ public class TestSimulatedFSDataset exte public void testStorageUsage() throws IOException { - FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); + FSDatasetInterface fsdataset = getSimulatedFSDataset(); assertEquals(fsdataset.getDfsUsed(), 0); assertEquals(fsdataset.getRemaining(), fsdataset.getCapacity()); int bytesAdded = addSomeBlocks(fsdataset); assertEquals(bytesAdded, fsdataset.getDfsUsed()); assertEquals(fsdataset.getCapacity()-bytesAdded, fsdataset.getRemaining()); - } - void checkBlockDataAndSize(FSDatasetInterface fsdataset, - Block b, long expectedLen) throws IOException { + void checkBlockDataAndSize(FSDatasetInterface fsdataset, ExtendedBlock b, + long expectedLen) throws IOException { InputStream input = fsdataset.getBlockInputStream(b); long lengthRead = 0; int data; @@ -134,36 +131,35 @@ public class TestSimulatedFSDataset exte } public void testWriteRead() throws IOException { - FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); + FSDatasetInterface fsdataset = getSimulatedFSDataset(); addSomeBlocks(fsdataset); for (int i=1; i <= NUMBLOCKS; ++i) { - Block b = new Block(i, 0, 0); + ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0); assertTrue(fsdataset.isValidBlock(b)); assertEquals(blockIdToLen(i), fsdataset.getLength(b)); checkBlockDataAndSize(fsdataset, b, blockIdToLen(i)); } } - - public void testGetBlockReport() throws IOException { - SimulatedFSDataset fsdataset = new SimulatedFSDataset(conf); - BlockListAsLongs blockReport = fsdataset.getBlockReport(); + SimulatedFSDataset fsdataset = getSimulatedFSDataset(); + BlockListAsLongs blockReport = fsdataset.getBlockReport(bpid); assertEquals(0, blockReport.getNumberOfBlocks()); - int bytesAdded = addSomeBlocks(fsdataset); - blockReport = fsdataset.getBlockReport(); + addSomeBlocks(fsdataset); + blockReport = fsdataset.getBlockReport(bpid); assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks()); for (Block b: blockReport) { assertNotNull(b); assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes()); } } + public void testInjectionEmpty() throws IOException { - SimulatedFSDataset fsdataset = new SimulatedFSDataset(conf); - BlockListAsLongs blockReport = fsdataset.getBlockReport(); + SimulatedFSDataset fsdataset = getSimulatedFSDataset(); + BlockListAsLongs blockReport = fsdataset.getBlockReport(bpid); assertEquals(0, blockReport.getNumberOfBlocks()); int bytesAdded = addSomeBlocks(fsdataset); - blockReport = fsdataset.getBlockReport(); + blockReport = fsdataset.getBlockReport(bpid); assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks()); for (Block b: blockReport) { assertNotNull(b); @@ -172,28 +168,26 @@ public class TestSimulatedFSDataset exte // Inject blocks into an empty fsdataset // - injecting the blocks we got above. - - - SimulatedFSDataset sfsdataset = new SimulatedFSDataset(conf); - sfsdataset.injectBlocks(blockReport); - blockReport = sfsdataset.getBlockReport(); + SimulatedFSDataset sfsdataset = getSimulatedFSDataset(); + sfsdataset.injectBlocks(bpid, blockReport); + blockReport = sfsdataset.getBlockReport(bpid); assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks()); for (Block b: blockReport) { assertNotNull(b); assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes()); - assertEquals(blockIdToLen(b.getBlockId()), sfsdataset.getLength(b)); + assertEquals(blockIdToLen(b.getBlockId()), sfsdataset + .getLength(new ExtendedBlock(bpid, b))); } assertEquals(bytesAdded, sfsdataset.getDfsUsed()); assertEquals(sfsdataset.getCapacity()-bytesAdded, sfsdataset.getRemaining()); } public void testInjectionNonEmpty() throws IOException { - SimulatedFSDataset fsdataset = new SimulatedFSDataset(conf); - - BlockListAsLongs blockReport = fsdataset.getBlockReport(); + SimulatedFSDataset fsdataset = getSimulatedFSDataset(); + BlockListAsLongs blockReport = fsdataset.getBlockReport(bpid); assertEquals(0, blockReport.getNumberOfBlocks()); int bytesAdded = addSomeBlocks(fsdataset); - blockReport = fsdataset.getBlockReport(); + blockReport = fsdataset.getBlockReport(bpid); assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks()); for (Block b: blockReport) { assertNotNull(b); @@ -203,44 +197,41 @@ public class TestSimulatedFSDataset exte // Inject blocks into an non-empty fsdataset // - injecting the blocks we got above. - - - SimulatedFSDataset sfsdataset = new SimulatedFSDataset(conf); + SimulatedFSDataset sfsdataset = getSimulatedFSDataset(); // Add come blocks whose block ids do not conflict with // the ones we are going to inject. bytesAdded += addSomeBlocks(sfsdataset, NUMBLOCKS+1); - BlockListAsLongs blockReport2 = sfsdataset.getBlockReport(); + sfsdataset.getBlockReport(bpid); assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks()); - blockReport2 = sfsdataset.getBlockReport(); + sfsdataset.getBlockReport(bpid); assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks()); - sfsdataset.injectBlocks(blockReport); - blockReport = sfsdataset.getBlockReport(); + sfsdataset.injectBlocks(bpid, blockReport); + blockReport = sfsdataset.getBlockReport(bpid); assertEquals(NUMBLOCKS*2, blockReport.getNumberOfBlocks()); for (Block b: blockReport) { assertNotNull(b); assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes()); - assertEquals(blockIdToLen(b.getBlockId()), sfsdataset.getLength(b)); + assertEquals(blockIdToLen(b.getBlockId()), sfsdataset + .getLength(new ExtendedBlock(bpid, b))); } assertEquals(bytesAdded, sfsdataset.getDfsUsed()); assertEquals(sfsdataset.getCapacity()-bytesAdded, sfsdataset.getRemaining()); - // Now test that the dataset cannot be created if it does not have sufficient cap - conf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY, 10); try { - sfsdataset = new SimulatedFSDataset(conf); - sfsdataset.injectBlocks(blockReport); + sfsdataset = getSimulatedFSDataset(); + sfsdataset.addBlockPool(bpid, conf); + sfsdataset.injectBlocks(bpid, blockReport); assertTrue("Expected an IO exception", false); } catch (IOException e) { // ok - as expected } - } - public void checkInvalidBlock(Block b) throws IOException { - FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); + public void checkInvalidBlock(ExtendedBlock b) throws IOException { + FSDatasetInterface fsdataset = getSimulatedFSDataset(); assertFalse(fsdataset.isValidBlock(b)); try { fsdataset.getLength(b); @@ -262,41 +253,42 @@ public class TestSimulatedFSDataset exte } catch (IOException e) { // ok - as expected } - } public void testInValidBlocks() throws IOException { - FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); - Block b = new Block(1, 5, 0); + FSDatasetInterface fsdataset = getSimulatedFSDataset(); + ExtendedBlock b = new ExtendedBlock(bpid, 1, 5, 0); checkInvalidBlock(b); // Now check invlaid after adding some blocks addSomeBlocks(fsdataset); - b = new Block(NUMBLOCKS + 99, 5, 0); + b = new ExtendedBlock(bpid, NUMBLOCKS + 99, 5, 0); checkInvalidBlock(b); - } public void testInvalidate() throws IOException { - FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); + FSDatasetInterface fsdataset = getSimulatedFSDataset(); int bytesAdded = addSomeBlocks(fsdataset); Block[] deleteBlocks = new Block[2]; deleteBlocks[0] = new Block(1, 0, 0); deleteBlocks[1] = new Block(2, 0, 0); - fsdataset.invalidate(deleteBlocks); - checkInvalidBlock(deleteBlocks[0]); - checkInvalidBlock(deleteBlocks[1]); + fsdataset.invalidate(bpid, deleteBlocks); + checkInvalidBlock(new ExtendedBlock(bpid, deleteBlocks[0])); + checkInvalidBlock(new ExtendedBlock(bpid, deleteBlocks[1])); long sizeDeleted = blockIdToLen(1) + blockIdToLen(2); assertEquals(bytesAdded-sizeDeleted, fsdataset.getDfsUsed()); assertEquals(fsdataset.getCapacity()-bytesAdded+sizeDeleted, fsdataset.getRemaining()); - - // Now make sure the rest of the blocks are valid for (int i=3; i <= NUMBLOCKS; ++i) { Block b = new Block(i, 0, 0); - assertTrue(fsdataset.isValidBlock(b)); + assertTrue(fsdataset.isValidBlock(new ExtendedBlock(bpid, b))); } } - + + private SimulatedFSDataset getSimulatedFSDataset() throws IOException { + SimulatedFSDataset fsdataset = new SimulatedFSDataset(conf); + fsdataset.addBlockPool(bpid, conf); + return fsdataset; + } } Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java?rev=1097905&r1=1097904&r2=1097905&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java Fri Apr 29 18:16:32 2011 @@ -29,11 +29,12 @@ import org.apache.hadoop.hdfs.DFSTestUti import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DataTransferProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.log4j.Level; import org.junit.Assert; import org.junit.Test; @@ -49,14 +50,14 @@ public class TestTransferRbw { private static final Random RAN = new Random(); private static final short REPLICATION = (short)1; - private static ReplicaBeingWritten getRbw(final DataNode datanode - ) throws InterruptedException { - return (ReplicaBeingWritten)getReplica(datanode, ReplicaState.RBW); + private static ReplicaBeingWritten getRbw(final DataNode datanode, + String bpid) throws InterruptedException { + return (ReplicaBeingWritten)getReplica(datanode, bpid, ReplicaState.RBW); } private static ReplicaInPipeline getReplica(final DataNode datanode, - final ReplicaState expectedState) throws InterruptedException { + final String bpid, final ReplicaState expectedState) throws InterruptedException { final FSDataset dataset = ((FSDataset)datanode.data); - final Collection replicas = dataset.volumeMap.replicas(); + final Collection replicas = dataset.volumeMap.replicas(bpid); for(int i = 0; i < 5 && replicas.size() == 0; i++) { LOG.info("wait since replicas.size() == 0; i=" + i); Thread.sleep(1000); @@ -94,9 +95,10 @@ public class TestTransferRbw { final ReplicaBeingWritten oldrbw; final DataNode newnode; final DatanodeInfo newnodeinfo; + final String bpid = cluster.getNamesystem().getBlockPoolId(); { final DataNode oldnode = cluster.getDataNodes().get(0); - oldrbw = getRbw(oldnode); + oldrbw = getRbw(oldnode, bpid); LOG.info("oldrbw = " + oldrbw); //add a datanode @@ -109,15 +111,15 @@ public class TestTransferRbw { ).getDatanodeReport(DatanodeReportType.LIVE); Assert.assertEquals(2, datatnodeinfos.length); int i = 0; - for(; i < datatnodeinfos.length - && !datatnodeinfos[i].equals(newnode.dnRegistration); i++); + for(DatanodeRegistration dnReg = newnode.getDNRegistrationForBP(bpid); + i < datatnodeinfos.length && !datatnodeinfos[i].equals(dnReg); i++); Assert.assertTrue(i < datatnodeinfos.length); newnodeinfo = datatnodeinfos[i]; oldnodeinfo = datatnodeinfos[1 - i]; } //transfer RBW - final Block b = new Block(oldrbw.getBlockId(), oldrbw.getBytesAcked(), + final ExtendedBlock b = new ExtendedBlock(bpid, oldrbw.getBlockId(), oldrbw.getBytesAcked(), oldrbw.getGenerationStamp()); final DataTransferProtocol.Status s = DFSTestUtil.transferRbw( b, fs.getClient(), oldnodeinfo, newnodeinfo); @@ -125,7 +127,7 @@ public class TestTransferRbw { } //check new rbw - final ReplicaBeingWritten newrbw = getRbw(newnode); + final ReplicaBeingWritten newrbw = getRbw(newnode, bpid); LOG.info("newrbw = " + newrbw); Assert.assertEquals(oldrbw.getBlockId(), newrbw.getBlockId()); Assert.assertEquals(oldrbw.getGenerationStamp(), newrbw.getGenerationStamp()); Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java?rev=1097905&r1=1097904&r2=1097905&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java Fri Apr 29 18:16:32 2011 @@ -19,23 +19,17 @@ package org.apache.hadoop.hdfs.server.da import java.io.IOException; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; - import org.junit.Assert; import org.junit.Test; /** Test if FSDataset#append, writeToRbw, and writeToTmp */ public class TestWriteToReplica { - final private static Block[] blocks = new Block[] { - new Block(1, 1, 2001), new Block(2, 1, 2002), - new Block(3, 1, 2003), new Block(4, 1, 2004), - new Block(5, 1, 2005), new Block(6, 1, 2006) - }; + final private static int FINALIZED = 0; final private static int TEMPORARY = 1; final private static int RBW = 2; @@ -47,16 +41,19 @@ public class TestWriteToReplica { @Test public void testClose() throws Exception { MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build(); + try { cluster.waitActive(); DataNode dn = cluster.getDataNodes().get(0); FSDataset dataSet = (FSDataset)dn.data; // set up replicasMap - setup(dataSet); + String bpid = cluster.getNamesystem().getBlockPoolId(); + + ExtendedBlock[] blocks = setup(bpid, dataSet); // test close - testClose(dataSet); + testClose(dataSet, blocks); } finally { cluster.shutdown(); } @@ -72,10 +69,11 @@ public class TestWriteToReplica { FSDataset dataSet = (FSDataset)dn.data; // set up replicasMap - setup(dataSet); + String bpid = cluster.getNamesystem().getBlockPoolId(); + ExtendedBlock[] blocks = setup(bpid, dataSet); // test append - testAppend(dataSet); + testAppend(bpid, dataSet, blocks); } finally { cluster.shutdown(); } @@ -91,10 +89,11 @@ public class TestWriteToReplica { FSDataset dataSet = (FSDataset)dn.data; // set up replicasMap - setup(dataSet); + String bpid = cluster.getNamesystem().getBlockPoolId(); + ExtendedBlock[] blocks = setup(bpid, dataSet); // test writeToRbw - testWriteToRbw(dataSet); + testWriteToRbw(dataSet, blocks); } finally { cluster.shutdown(); } @@ -110,49 +109,70 @@ public class TestWriteToReplica { FSDataset dataSet = (FSDataset)dn.data; // set up replicasMap - setup(dataSet); + String bpid = cluster.getNamesystem().getBlockPoolId(); + ExtendedBlock[] blocks = setup(bpid, dataSet); // test writeToTemporary - testWriteToTemporary(dataSet); + testWriteToTemporary(dataSet, blocks); } finally { cluster.shutdown(); } } - private void setup(FSDataset dataSet) throws IOException { + /** + * Generate testing environment and return a collection of blocks + * on which to run the tests. + * + * @param bpid Block pool ID to generate blocks for + * @param dataSet Namespace in which to insert blocks + * @return Contrived blocks for further testing. + * @throws IOException + */ + private ExtendedBlock[] setup(String bpid, FSDataset dataSet) throws IOException { // setup replicas map + + ExtendedBlock[] blocks = new ExtendedBlock[] { + new ExtendedBlock(bpid, 1, 1, 2001), new ExtendedBlock(bpid, 2, 1, 2002), + new ExtendedBlock(bpid, 3, 1, 2003), new ExtendedBlock(bpid, 4, 1, 2004), + new ExtendedBlock(bpid, 5, 1, 2005), new ExtendedBlock(bpid, 6, 1, 2006) + }; + ReplicasMap replicasMap = dataSet.volumeMap; FSVolume vol = dataSet.volumes.getNextVolume(0); ReplicaInfo replicaInfo = new FinalizedReplica( - blocks[FINALIZED], vol, vol.getDir()); - replicasMap.add(replicaInfo); + blocks[FINALIZED].getLocalBlock(), vol, vol.getDir()); + replicasMap.add(bpid, replicaInfo); replicaInfo.getBlockFile().createNewFile(); replicaInfo.getMetaFile().createNewFile(); - replicasMap.add(new ReplicaInPipeline( + replicasMap.add(bpid, new ReplicaInPipeline( blocks[TEMPORARY].getBlockId(), blocks[TEMPORARY].getGenerationStamp(), vol, - vol.createTmpFile(blocks[TEMPORARY]).getParentFile())); + vol.createTmpFile(bpid, blocks[TEMPORARY].getLocalBlock()).getParentFile())); - replicaInfo = new ReplicaBeingWritten(blocks[RBW], vol, - vol.createRbwFile(blocks[RBW]).getParentFile(), null); - replicasMap.add(replicaInfo); + replicaInfo = new ReplicaBeingWritten(blocks[RBW].getLocalBlock(), vol, + vol.createRbwFile(bpid, blocks[RBW].getLocalBlock()).getParentFile(), null); + replicasMap.add(bpid, replicaInfo); replicaInfo.getBlockFile().createNewFile(); replicaInfo.getMetaFile().createNewFile(); - replicasMap.add(new ReplicaWaitingToBeRecovered(blocks[RWR], vol, - vol.createRbwFile(blocks[RWR]).getParentFile())); - replicasMap.add(new ReplicaUnderRecovery( - new FinalizedReplica(blocks[RUR], vol, vol.getDir()), 2007)); + replicasMap.add(bpid, new ReplicaWaitingToBeRecovered( + blocks[RWR].getLocalBlock(), vol, vol.createRbwFile(bpid, + blocks[RWR].getLocalBlock()).getParentFile())); + replicasMap.add(bpid, new ReplicaUnderRecovery(new FinalizedReplica(blocks[RUR] + .getLocalBlock(), vol, vol.getDir()), 2007)); + + return blocks; } - private void testAppend(FSDataset dataSet) throws IOException { + private void testAppend(String bpid, FSDataset dataSet, ExtendedBlock[] blocks) throws IOException { long newGS = blocks[FINALIZED].getGenerationStamp()+1; - FSVolume v = dataSet.volumeMap.get(blocks[FINALIZED]).getVolume(); + FSVolume v = dataSet.volumeMap.get(bpid, blocks[FINALIZED].getLocalBlock()) + .getVolume(); long available = v.getCapacity()-v.getDfsUsed(); long expectedLen = blocks[FINALIZED].getNumBytes(); try { - v.decDfsUsed(-available); + v.decDfsUsed(bpid, -available); blocks[FINALIZED].setNumBytes(expectedLen+100); dataSet.append(blocks[FINALIZED], newGS, expectedLen); Assert.fail("Should not have space to append to an RWR replica" + blocks[RWR]); @@ -160,7 +180,7 @@ public class TestWriteToReplica { Assert.assertTrue(e.getMessage().startsWith( "Insufficient space for appending to ")); } - v.decDfsUsed(available); + v.decDfsUsed(bpid, available); blocks[FINALIZED].setNumBytes(expectedLen); newGS = blocks[RBW].getGenerationStamp()+1; @@ -265,7 +285,7 @@ public class TestWriteToReplica { } } - private void testClose(FSDataset dataSet) throws IOException { + private void testClose(FSDataset dataSet, ExtendedBlock [] blocks) throws IOException { long newGS = blocks[FINALIZED].getGenerationStamp()+1; dataSet.recoverClose(blocks[FINALIZED], newGS, blocks[FINALIZED].getNumBytes()); // successful @@ -315,7 +335,7 @@ public class TestWriteToReplica { } } - private void testWriteToRbw(FSDataset dataSet) throws IOException { + private void testWriteToRbw(FSDataset dataSet, ExtendedBlock[] blocks) throws IOException { try { dataSet.recoverRbw(blocks[FINALIZED], blocks[FINALIZED].getGenerationStamp()+1, @@ -408,7 +428,7 @@ public class TestWriteToReplica { dataSet.createRbw(blocks[NON_EXISTENT]); } - private void testWriteToTemporary(FSDataset dataSet) throws IOException { + private void testWriteToTemporary(FSDataset dataSet, ExtendedBlock[] blocks) throws IOException { try { dataSet.createTemporary(blocks[FINALIZED]); Assert.fail("Should not have created a temporary replica that was " + Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1097905&r1=1097904&r2=1097905&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Fri Apr 29 18:16:32 2011 @@ -36,6 +36,7 @@ import org.apache.hadoop.fs.permission.F import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.FSConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.datanode.DataNode; @@ -74,7 +75,7 @@ import org.apache.log4j.LogManager; *
  • -logLevel L specifies the logging level when the benchmark runs. * The default logging level is {@link Level#ERROR}.
  • *
  • -UGCacheRefreshCount G will cause the benchmark to call - * {@link NameNode#refreshUserToGroupsMappings(Configuration)} after + * {@link NameNode#refreshUserToGroupsMappings()} after * every G operations, which purges the name-node's user group cache. * By default the refresh is never called.
  • *
  • -keepResults do not clean up the name-space after execution.
  • @@ -798,8 +799,9 @@ public class NNThroughputBenchmark { */ void sendHeartbeat() throws IOException { // register datanode - DatanodeCommand[] cmds = nameNode.sendHeartbeat( - dnRegistration, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, 0, 0); + // TODO:FEDERATION currently a single block pool is supported + DatanodeCommand[] cmds = nameNode.sendHeartbeat(dnRegistration, + DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0); if(cmds != null) { for (DatanodeCommand cmd : cmds ) { if(LOG.isDebugEnabled()) { @@ -842,8 +844,9 @@ public class NNThroughputBenchmark { @SuppressWarnings("unused") // keep it for future blockReceived benchmark int replicateBlocks() throws IOException { // register datanode - DatanodeCommand[] cmds = nameNode.sendHeartbeat( - dnRegistration, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, 0, 0); + // TODO:FEDERATION currently a single block pool is supported + DatanodeCommand[] cmds = nameNode.sendHeartbeat(dnRegistration, + DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0); if (cmds != null) { for (DatanodeCommand cmd : cmds) { if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) { @@ -874,6 +877,7 @@ public class NNThroughputBenchmark { new DataStorage(nsInfo, dnInfo.getStorageID())); receivedDNReg.setInfoPort(dnInfo.getInfoPort()); nameNode.blockReceived( receivedDNReg, + nameNode.getNamesystem().getBlockPoolId(), new Block[] {blocks[i]}, new String[] {DataNode.EMPTY_DEL_HINT}); } @@ -969,7 +973,7 @@ public class NNThroughputBenchmark { nameNode.create(fileName, FsPermission.getDefault(), clientName, new EnumSetWritable(EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, replication, BLOCK_SIZE); - Block lastBlock = addBlocks(fileName, clientName); + ExtendedBlock lastBlock = addBlocks(fileName, clientName); nameNode.complete(fileName, clientName, lastBlock); } // prepare block reports @@ -978,18 +982,19 @@ public class NNThroughputBenchmark { } } - private Block addBlocks(String fileName, String clientName) + private ExtendedBlock addBlocks(String fileName, String clientName) throws IOException { - Block prevBlock = null; + ExtendedBlock prevBlock = null; for(int jdx = 0; jdx < blocksPerFile; jdx++) { LocatedBlock loc = nameNode.addBlock(fileName, clientName, prevBlock, null); prevBlock = loc.getBlock(); for(DatanodeInfo dnInfo : loc.getLocations()) { int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getName()); - datanodes[dnIdx].addBlock(loc.getBlock()); + datanodes[dnIdx].addBlock(loc.getBlock().getLocalBlock()); nameNode.blockReceived( datanodes[dnIdx].dnRegistration, - new Block[] {loc.getBlock()}, + loc.getBlock().getBlockPoolId(), + new Block[] {loc.getBlock().getLocalBlock()}, new String[] {""}); } } @@ -1007,7 +1012,8 @@ public class NNThroughputBenchmark { assert daemonId < numThreads : "Wrong daemonId."; TinyDatanode dn = datanodes[daemonId]; long start = System.currentTimeMillis(); - nameNode.blockReport(dn.dnRegistration, dn.getBlockReportList()); + nameNode.blockReport(dn.dnRegistration, nameNode.getNamesystem() + .getBlockPoolId(), dn.getBlockReportList()); long end = System.currentTimeMillis(); return end-start; } Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java?rev=1097905&r1=1097904&r2=1097905&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java Fri Apr 29 18:16:32 2011 @@ -140,7 +140,7 @@ public class OfflineEditsViewerHelper { // no check, if it's not it throws an exception which is what we want DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem(); - FileContext fc = FileContext.getFileContext(cluster.getURI(), config); + FileContext fc = FileContext.getFileContext(cluster.getURI(0), config); // OP_ADD 0, OP_SET_GENSTAMP 10 Path pathFileCreate = new Path("/file_create"); FSDataOutputStream s = dfs.create(pathFileCreate); Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java?rev=1097905&r1=1097904&r2=1097905&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java Fri Apr 29 18:16:32 2011 @@ -28,7 +28,6 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.TestHDFSServerPorts; import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.FSImage.CheckpointStates; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -38,8 +37,6 @@ import junit.framework.TestCase; public class TestBackupNode extends TestCase { public static final Log LOG = LogFactory.getLog(TestBackupNode.class); - // reset default 0.0.0.0 addresses in order to avoid IPv6 problem - static final String THIS_HOST = TestHDFSServerPorts.getFullHostName(); static final String BASE_DIR = MiniDFSCluster.getBaseDirectory(); protected void setUp() throws Exception { @@ -138,10 +135,6 @@ public class TestBackupNode extends Test // // Take a checkpoint // - conf.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, - THIS_HOST + ":0"); - conf.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, - THIS_HOST + ":0"); backup = startBackupNode(conf, op, 1); waitCheckpointDone(backup); } catch(IOException e) { @@ -224,17 +217,13 @@ public class TestBackupNode extends Test try { // start name-node and backup node 1 cluster = new MiniDFSCluster.Builder(conf1).numDataNodes(0).build(); - conf1.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, - THIS_HOST + ":7771"); - conf1.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, - THIS_HOST + ":7775"); + conf1.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, "0.0.0.0:7771"); + conf1.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, "0.0.0.0:7775"); backup1 = startBackupNode(conf1, StartupOption.BACKUP, 1); // try to start backup node 2 conf2 = new HdfsConfiguration(conf1); - conf2.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, - THIS_HOST + ":7772"); - conf2.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, - THIS_HOST + ":7776"); + conf2.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, "0.0.0.0:7772"); + conf2.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, "0.0.0.0:7776"); try { backup2 = startBackupNode(conf2, StartupOption.BACKUP, 2); backup2.stop(); Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java?rev=1097905&r1=1097904&r2=1097905&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java Fri Apr 29 18:16:32 2011 @@ -31,8 +31,8 @@ import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.security.token.block.*; import org.apache.hadoop.hdfs.server.balancer.TestBalancer; @@ -122,7 +122,7 @@ public class TestBlockTokenWithDFS exten InetSocketAddress targetAddr = null; Socket s = null; BlockReader blockReader = null; - Block block = lblock.getBlock(); + ExtendedBlock block = lblock.getBlock(); try { DatanodeInfo[] nodes = lblock.getLocations(); targetAddr = NetUtils.createSocketAddr(nodes[0].getName()); @@ -130,7 +130,8 @@ public class TestBlockTokenWithDFS exten s.connect(targetAddr, HdfsConstants.READ_TIMEOUT); s.setSoTimeout(HdfsConstants.READ_TIMEOUT); - String file = BlockReader.getFileName(targetAddr, block.getBlockId()); + String file = BlockReader.getFileName(targetAddr, + "test-blockpoolid", block.getBlockId()); blockReader = BlockReader.newBlockReader(s, file, block, lblock.getBlockToken(), 0, -1, conf.getInt("io.file.buffer.size", 4096)); @@ -351,7 +352,8 @@ public class TestBlockTokenWithDFS exten // read should succeed tryRead(conf, lblock, true); // use a token with wrong blockID - Block wrongBlock = new Block(lblock.getBlock().getBlockId() + 1); + ExtendedBlock wrongBlock = new ExtendedBlock(lblock.getBlock() + .getBlockPoolId(), lblock.getBlock().getBlockId() + 1); lblock.setBlockToken(cluster.getNameNode().getNamesystem() .blockTokenSecretManager.generateToken(wrongBlock, EnumSet.of(BlockTokenSecretManager.AccessMode.READ))); @@ -413,7 +415,7 @@ public class TestBlockTokenWithDFS exten assertTrue(cluster.restartDataNodes(true)); cluster.waitActive(); assertEquals(numDataNodes, cluster.getDataNodes().size()); - cluster.shutdownNameNode(); + cluster.shutdownNameNode(0); // confirm tokens cached in in1 are still valid lblocks = DFSTestUtil.getAllBlocks(in1); @@ -449,8 +451,8 @@ public class TestBlockTokenWithDFS exten */ // restart the namenode and then shut it down for test - cluster.restartNameNode(); - cluster.shutdownNameNode(); + cluster.restartNameNode(0); + cluster.shutdownNameNode(0); // verify blockSeekTo() still works (forced to use cached tokens) in1.seek(0); @@ -469,13 +471,13 @@ public class TestBlockTokenWithDFS exten */ // restore the cluster and restart the datanodes for test - cluster.restartNameNode(); + cluster.restartNameNode(0); assertTrue(cluster.restartDataNodes(true)); cluster.waitActive(); assertEquals(numDataNodes, cluster.getDataNodes().size()); // shutdown namenode so that DFSClient can't get new tokens from namenode - cluster.shutdownNameNode(); + cluster.shutdownNameNode(0); // verify blockSeekTo() fails (cached tokens become invalid) in1.seek(0); @@ -484,7 +486,7 @@ public class TestBlockTokenWithDFS exten assertFalse(checkFile2(in3)); // restart the namenode to allow DFSClient to re-fetch tokens - cluster.restartNameNode(); + cluster.restartNameNode(0); // verify blockSeekTo() works again (by transparently re-fetching // tokens from namenode) in1.seek(0); Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java?rev=1097905&r1=1097904&r2=1097905&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java Fri Apr 29 18:16:32 2011 @@ -163,7 +163,7 @@ public class TestBlockUnderConstruction final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len); final List blocks = lb.getLocatedBlocks(); assertEquals(i, blocks.size()); - final Block b = blocks.get(blocks.size() - 1).getBlock(); + final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock(); assertTrue(b instanceof BlockInfoUnderConstruction); if (++i < NUM_BLOCKS) { Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlocksWithNotEnoughRacks.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlocksWithNotEnoughRacks.java?rev=1097905&r1=1097904&r2=1097905&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlocksWithNotEnoughRacks.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlocksWithNotEnoughRacks.java Fri Apr 29 18:16:32 2011 @@ -28,7 +28,8 @@ import org.apache.hadoop.hdfs.DFSConfigK import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -92,7 +93,7 @@ public class TestBlocksWithNotEnoughRack // Create a file with one block with a replication factor of 3 final FileSystem fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L); - Block b = DFSTestUtil.getFirstBlock(fs, filePath); + ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath); DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 1); // Add a new datanode on a different rack @@ -126,7 +127,7 @@ public class TestBlocksWithNotEnoughRack // Create a file with one block with a replication factor of 1 final FileSystem fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L); - Block b = DFSTestUtil.getFirstBlock(fs, filePath); + ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath); DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 0); REPLICATION_FACTOR = 2; @@ -160,7 +161,7 @@ public class TestBlocksWithNotEnoughRack // Create a file with one block final FileSystem fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L); - Block b = DFSTestUtil.getFirstBlock(fs, filePath); + ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath); DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 1); // Add new datanodes on a different rack and increase the @@ -200,12 +201,12 @@ public class TestBlocksWithNotEnoughRack DFSTestUtil.createFile(fs, filePath, fileLen, REPLICATION_FACTOR, 1L); final String fileContent = DFSTestUtil.readFile(fs, filePath); - Block b = DFSTestUtil.getFirstBlock(fs, filePath); + ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath); DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); // Corrupt a replica of the block int dnToCorrupt = DFSTestUtil.firstDnWithBlock(cluster, b); - assertTrue(cluster.corruptReplica(b.getBlockName(), dnToCorrupt)); + assertTrue(MiniDFSCluster.corruptReplica(dnToCorrupt, b)); // Restart the datanode so blocks are re-scanned, and the corrupt // block is detected. @@ -220,7 +221,7 @@ public class TestBlocksWithNotEnoughRack // Ensure all replicas are valid (the corrupt replica may not // have been cleaned up yet). for (int i = 0; i < racks.length; i++) { - String blockContent = cluster.readBlockOnDataNode(i, b.getBlockName()); + String blockContent = cluster.readBlockOnDataNode(i, b); if (blockContent != null && i != dnToCorrupt) { assertEquals("Corrupt replica", fileContent, blockContent); } @@ -248,7 +249,7 @@ public class TestBlocksWithNotEnoughRack // Create a file with one block final FileSystem fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L); - Block b = DFSTestUtil.getFirstBlock(fs, filePath); + ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath); DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); // Decrease the replication factor, make sure the deleted replica @@ -282,7 +283,7 @@ public class TestBlocksWithNotEnoughRack // Create a file with one block with a replication factor of 2 final FileSystem fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L); - Block b = DFSTestUtil.getFirstBlock(fs, filePath); + ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath); DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); // Make the last datanode look like it failed to heartbeat by @@ -290,8 +291,9 @@ public class TestBlocksWithNotEnoughRack ArrayList datanodes = cluster.getDataNodes(); int idx = datanodes.size() - 1; DataNode dataNode = datanodes.get(idx); + DatanodeID dnId = dataNode.getDatanodeId(); cluster.stopDataNode(idx); - ns.removeDatanode(dataNode.dnRegistration); + ns.removeDatanode(dnId); // The block should still have sufficient # replicas, across racks. // The last node may not have contained a replica, but if it did @@ -303,8 +305,9 @@ public class TestBlocksWithNotEnoughRack datanodes = cluster.getDataNodes(); idx = datanodes.size() - 1; dataNode = datanodes.get(idx); + dnId = dataNode.getDatanodeId(); cluster.stopDataNode(idx); - ns.removeDatanode(dataNode.dnRegistration); + ns.removeDatanode(dnId); // Make sure we have enough live replicas even though we are // short one rack and therefore need one replica @@ -334,7 +337,7 @@ public class TestBlocksWithNotEnoughRack // Create a file with one block final FileSystem fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L); - Block b = DFSTestUtil.getFirstBlock(fs, filePath); + ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath); DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); // Make the last (cross rack) datanode look like it failed @@ -342,8 +345,9 @@ public class TestBlocksWithNotEnoughRack ArrayList datanodes = cluster.getDataNodes(); assertEquals(3, datanodes.size()); DataNode dataNode = datanodes.get(2); + DatanodeID dnId = dataNode.getDatanodeId(); cluster.stopDataNode(2); - ns.removeDatanode(dataNode.dnRegistration); + ns.removeDatanode(dnId); // The block gets re-replicated to another datanode so it has a // sufficient # replicas, but not across racks, so there should @@ -394,7 +398,7 @@ public class TestBlocksWithNotEnoughRack // Create a file with one block final FileSystem fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L); - Block b = DFSTestUtil.getFirstBlock(fs, filePath); + ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath); DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); // Decommission one of the hosts with the block, this should cause @@ -443,7 +447,7 @@ public class TestBlocksWithNotEnoughRack try { final FileSystem fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L); - Block b = DFSTestUtil.getFirstBlock(fs, filePath); + ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath); DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); // Lower the replication factor so the blocks are over replicated @@ -472,4 +476,4 @@ public class TestBlocksWithNotEnoughRack cluster.shutdown(); } } -} \ No newline at end of file +} Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java?rev=1097905&r1=1097904&r2=1097905&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java Fri Apr 29 18:16:32 2011 @@ -95,7 +95,7 @@ public class TestCheckPointForSecurityTo String[] args = new String[]{"-saveNamespace"}; // verify that the edits file is NOT empty - Collection editsDirs = cluster.getNameEditsDirs(); + Collection editsDirs = cluster.getNameEditsDirs(0); for(URI uri : editsDirs) { File ed = new File(uri.getPath()); Assert.assertTrue(new File(ed, "current/edits").length() > Integer.SIZE/Byte.SIZE); Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=1097905&r1=1097904&r2=1097905&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java Fri Apr 29 18:16:32 2011 @@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.na import junit.framework.TestCase; import java.io.*; +import java.net.InetSocketAddress; import java.net.URI; import java.util.Collection; import java.util.List; @@ -26,6 +27,7 @@ import java.util.Iterator; import java.util.Random; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -656,7 +658,7 @@ public class TestCheckpoint extends Test // assertTrue(!fileSys.exists(file1)); assertTrue(!fileSys.exists(file2)); - namedirs = cluster.getNameDirs(); + namedirs = cluster.getNameDirs(0); // // Create file1 @@ -743,7 +745,7 @@ public class TestCheckpoint extends Test cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build(); cluster.waitActive(); fs = (DistributedFileSystem)(cluster.getFileSystem()); - fc = FileContext.getFileContext(cluster.getURI()); + fc = FileContext.getFileContext(cluster.getURI(0)); // Saving image without safe mode should fail DFSAdmin admin = new DFSAdmin(conf); @@ -766,7 +768,7 @@ public class TestCheckpoint extends Test assertTrue(fc.getFileLinkStatus(symlink).isSymlink()); // verify that the edits file is NOT empty - Collection editsDirs = cluster.getNameEditsDirs(); + Collection editsDirs = cluster.getNameEditsDirs(0); for(URI uri : editsDirs) { File ed = new File(uri.getPath()); assertTrue(new File(ed, "current/edits").length() > Integer.SIZE/Byte.SIZE); @@ -793,7 +795,7 @@ public class TestCheckpoint extends Test cluster.waitActive(); fs = (DistributedFileSystem)(cluster.getFileSystem()); checkFile(fs, file, replication); - fc = FileContext.getFileContext(cluster.getURI()); + fc = FileContext.getFileContext(cluster.getURI(0)); assertTrue(fc.getFileLinkStatus(symlink).isSymlink()); } finally { if(fs != null) fs.close(); @@ -801,6 +803,85 @@ public class TestCheckpoint extends Test } } + /* Test case to test CheckpointSignature */ + @SuppressWarnings("deprecation") + public void testCheckpointSignature() throws IOException { + + MiniDFSCluster cluster = null; + Configuration conf = new HdfsConfiguration(); + + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) + .format(false).build(); + NameNode nn = cluster.getNameNode(); + + SecondaryNameNode secondary = startSecondaryNameNode(conf); + // prepare checkpoint image + secondary.doCheckpoint(); + CheckpointSignature sig = nn.rollEditLog(); + // manipulate the CheckpointSignature fields + sig.setBlockpoolID("somerandomebpid"); + sig.clusterID = "somerandomcid"; + try { + sig.validateStorageInfo(nn.getFSImage()); // this should fail + assertTrue("This test is expected to fail.", false); + } catch (Exception ignored) { + } + + secondary.shutdown(); + cluster.shutdown(); + } + + /** + * Starts two namenodes and two secondary namenodes, verifies that secondary + * namenodes are configured correctly to talk to their respective namenodes + * and can do the checkpoint. + * + * @throws IOException + */ + @SuppressWarnings("deprecation") + public void testMultipleSecondaryNamenodes() throws IOException { + Configuration conf = new HdfsConfiguration(); + String nameserviceId1 = "ns1"; + String nameserviceId2 = "ns2"; + conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, nameserviceId1 + + "," + nameserviceId2); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numNameNodes(2) + .nameNodePort(9928).build(); + Configuration snConf1 = new HdfsConfiguration(cluster.getConfiguration(0)); + Configuration snConf2 = new HdfsConfiguration(cluster.getConfiguration(1)); + InetSocketAddress nn1RpcAddress = cluster.getNameNode(0).rpcAddress; + InetSocketAddress nn2RpcAddress = cluster.getNameNode(1).rpcAddress; + String nn1 = nn1RpcAddress.getHostName() + ":" + nn1RpcAddress.getPort(); + String nn2 = nn2RpcAddress.getHostName() + ":" + nn2RpcAddress.getPort(); + + // Set the Service Rpc address to empty to make sure the node specific + // setting works + snConf1.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, ""); + snConf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, ""); + + // Set the nameserviceIds + snConf1.set(DFSUtil.getNameServiceIdKey( + DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId1), nn1); + snConf2.set(DFSUtil.getNameServiceIdKey( + DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId2), nn2); + + SecondaryNameNode secondary1 = startSecondaryNameNode(snConf1); + SecondaryNameNode secondary2 = startSecondaryNameNode(snConf2); + + // make sure the two secondary namenodes are talking to correct namenodes. + assertEquals(secondary1.getNameNodeAddress().getPort(), nn1RpcAddress.getPort()); + assertEquals(secondary2.getNameNodeAddress().getPort(), nn2RpcAddress.getPort()); + assertTrue(secondary1.getNameNodeAddress().getPort() != secondary2 + .getNameNodeAddress().getPort()); + + // both should checkpoint. + secondary1.doCheckpoint(); + secondary2.doCheckpoint(); + secondary1.shutdown(); + secondary2.shutdown(); + cluster.shutdown(); + } + /** * Simulate a secondary node failure to transfer image * back to the name-node. Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java?rev=1097905&r1=1097904&r2=1097905&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java Fri Apr 29 18:16:32 2011 @@ -32,6 +32,8 @@ import org.apache.hadoop.hdfs.DFSConfigK import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.TestDatanodeBlockScanner; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.junit.Test; /** A JUnit test for corrupt_files.jsp */ @@ -80,9 +82,8 @@ public class TestCorruptFilesJsp { // Now corrupt all the files except for the last one for (int idx = 0; idx < filepaths.length - 1; idx++) { - String blockName = DFSTestUtil.getFirstBlock(fs, filepaths[idx]) - .getBlockName(); - assertTrue(cluster.corruptReplica(blockName, 0)); + ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, filepaths[idx]); + assertTrue(TestDatanodeBlockScanner.corruptReplica(blk, 0)); // read the file so that the corrupt block is reported to NN FSDataInputStream in = fs.open(filepaths[idx]); Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDatanodeDescriptor.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDatanodeDescriptor.java?rev=1097905&r1=1097904&r2=1097905&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDatanodeDescriptor.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDatanodeDescriptor.java Fri Apr 29 18:16:32 2011 @@ -21,7 +21,6 @@ import java.util.ArrayList; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.common.GenerationStamp; -import org.apache.hadoop.hdfs.server.protocol.BlockCommand; import junit.framework.TestCase; @@ -43,10 +42,10 @@ public class TestDatanodeDescriptor exte blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP)); } dd.addBlocksToBeInvalidated(blockList); - BlockCommand bc = dd.getInvalidateBlocks(MAX_LIMIT); - assertEquals(bc.getBlocks().length, MAX_LIMIT); + Block[] bc = dd.getInvalidateBlocks(MAX_LIMIT); + assertEquals(bc.length, MAX_LIMIT); bc = dd.getInvalidateBlocks(MAX_LIMIT); - assertEquals(bc.getBlocks().length, REMAINING_BLOCKS); + assertEquals(bc.length, REMAINING_BLOCKS); } public void testBlocksCounter() throws Exception { Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java?rev=1097905&r1=1097904&r2=1097905&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java Fri Apr 29 18:16:32 2011 @@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.HdfsConfig import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; @@ -86,10 +87,12 @@ public class TestDeadDatanode { cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); + String poolId = cluster.getNamesystem().getBlockPoolId(); // wait for datanode to be marked live DataNode dn = cluster.getDataNodes().get(0); - DatanodeRegistration reg = cluster.getDataNodes().get(0) - .getDatanodeRegistration(); + DatanodeRegistration reg = + DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId); + waitForDatanodeState(reg.getStorageID(), true, 20000); // Shutdown and wait for datanode to be marked dead @@ -97,13 +100,13 @@ public class TestDeadDatanode { waitForDatanodeState(reg.getStorageID(), false, 20000); DatanodeProtocol dnp = cluster.getNameNode(); - Block block = new Block(0); - Block[] blocks = new Block[] { block }; + + Block[] blocks = new Block[] { new Block(0) }; String[] delHints = new String[] { "" }; // Ensure blockReceived call from dead datanode is rejected with IOException try { - dnp.blockReceived(reg, blocks, delHints); + dnp.blockReceived(reg, poolId, blocks, delHints); Assert.fail("Expected IOException is not thrown"); } catch (IOException ex) { // Expected @@ -112,7 +115,7 @@ public class TestDeadDatanode { // Ensure blockReport from dead datanode is rejected with IOException long[] blockReport = new long[] { 0L, 0L, 0L }; try { - dnp.blockReport(reg, blockReport); + dnp.blockReport(reg, poolId, blockReport); Assert.fail("Expected IOException is not thrown"); } catch (IOException ex) { // Expected @@ -120,7 +123,7 @@ public class TestDeadDatanode { // Ensure heartbeat from dead datanode is rejected with a command // that asks datanode to register again - DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, 0, 0, 0, 0, 0); + DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, 0, 0, 0, 0, 0, 0); Assert.assertEquals(1, cmd.length); Assert.assertEquals(cmd[0].getAction(), DatanodeCommand.REGISTER .getAction()); Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1097905&r1=1097904&r2=1097905&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Fri Apr 29 18:16:32 2011 @@ -111,7 +111,7 @@ public class TestEditLog extends TestCas fileSys = cluster.getFileSystem(); final FSNamesystem namesystem = cluster.getNamesystem(); - for (Iterator it = cluster.getNameDirs().iterator(); it.hasNext(); ) { + for (Iterator it = cluster.getNameDirs(0).iterator(); it.hasNext(); ) { File dir = new File(it.next().getPath()); System.out.println(dir); } Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java?rev=1097905&r1=1097904&r2=1097905&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java Fri Apr 29 18:16:32 2011 @@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.server.com import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile; +import org.apache.hadoop.test.GenericTestUtils; import static org.junit.Assert.*; import org.junit.Test; @@ -319,7 +320,7 @@ public class TestEditLogRace { public void testSaveImageWhileSyncInProgress() throws Exception { Configuration conf = getConf(); NameNode.initMetrics(conf, NamenodeRole.ACTIVE); - NameNode.format(conf); + GenericTestUtils.formatNamenode(conf); final FSNamesystem namesystem = new FSNamesystem(conf); try { @@ -409,7 +410,7 @@ public class TestEditLogRace { public void testSaveRightBeforeSync() throws Exception { Configuration conf = getConf(); NameNode.initMetrics(conf, NamenodeRole.ACTIVE); - NameNode.format(conf); + GenericTestUtils.formatNamenode(conf); final FSNamesystem namesystem = new FSNamesystem(conf); try { Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=1097905&r1=1097904&r2=1097905&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Fri Apr 29 18:16:32 2011 @@ -28,7 +28,6 @@ import java.io.RandomAccessFile; import java.net.InetSocketAddress; import java.nio.channels.FileChannel; import java.security.PrivilegedExceptionAction; -import java.util.Collection; import java.util.Random; import java.util.regex.Pattern; @@ -45,6 +44,7 @@ import org.apache.hadoop.hdfs.DFSConfigK import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.tools.DFSck; @@ -247,15 +247,11 @@ public class TestFsck extends TestCase { String[] fileNames = util.getFileNames(topDir); DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf); - String block = dfsClient.getNamenode(). - getBlockLocations(fileNames[0], 0, Long.MAX_VALUE). - get(0).getBlock().getBlockName(); - File baseDir = new File(System.getProperty("test.build.data", - "build/test/data"),"dfs/data"); - for (int i=0; i<8; i++) { - File blockFile = new File(baseDir, "data" +(i+1) + - MiniDFSCluster.FINALIZED_DIR_NAME + block); - if(blockFile.exists()) { + ExtendedBlock block = dfsClient.getNamenode().getBlockLocations( + fileNames[0], 0, Long.MAX_VALUE).get(0).getBlock(); + for (int i=0; i<4; i++) { + File blockFile = MiniDFSCluster.getBlockFile(i, block); + if(blockFile != null && blockFile.exists()) { assertTrue(blockFile.delete()); } } @@ -355,7 +351,7 @@ public class TestFsck extends TestCase { DFSTestUtil.createFile(fs, file1, 1024, (short)3, 0); // Wait until file replication has completed DFSTestUtil.waitReplication(fs, file1, (short)3); - String block = DFSTestUtil.getFirstBlock(fs, file1).getBlockName(); + ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file1); // Make sure filesystem is in healthy state outStr = runFsck(conf, 0, true, "/"); @@ -363,12 +359,9 @@ public class TestFsck extends TestCase { assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS)); // corrupt replicas - File baseDir = new File(System.getProperty("test.build.data", - "build/test/data"),"dfs/data"); - for (int i=0; i < 6; i++) { - File blockFile = new File(baseDir, "data" + (i+1) + - MiniDFSCluster.FINALIZED_DIR_NAME + block); - if (blockFile.exists()) { + for (int i=0; i < 3; i++) { + File blockFile = MiniDFSCluster.getBlockFile(i, block); + if (blockFile != null && blockFile.exists()) { RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw"); FileChannel channel = raFile.getChannel(); String badString = "BADBAD"; @@ -470,19 +463,21 @@ public class TestFsck extends TestCase { System.out.println("1. good fsck out: " + outStr); assertTrue(outStr.contains("has 0 CORRUPT files")); // delete the blocks - File baseDir = new File(System.getProperty("test.build.data", - "build/test/data"),"dfs/data"); - for (int i=0; i<8; i++) { - File data_dir = new File(baseDir, "data" +(i+1)+ MiniDFSCluster.FINALIZED_DIR_NAME); - File[] blocks = data_dir.listFiles(); - if (blocks == null) - continue; - - for (int idx = 0; idx < blocks.length; idx++) { - if (!blocks[idx].getName().startsWith("blk_")) { + final String bpid = cluster.getNamesystem().getBlockPoolId(); + for (int i=0; i<4; i++) { + for (int j=0; j<=1; j++) { + File storageDir = MiniDFSCluster.getStorageDir(i, j); + File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); + File[] blocks = data_dir.listFiles(); + if (blocks == null) continue; + + for (int idx = 0; idx < blocks.length; idx++) { + if (!blocks[idx].getName().startsWith("blk_")) { + continue; + } + assertTrue("Cannot remove file.", blocks[idx].delete()); } - assertTrue("Cannot remove file.", blocks[idx].delete()); } } Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java?rev=1097905&r1=1097904&r2=1097905&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java Fri Apr 29 18:16:32 2011 @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import java.io.IOException; import java.util.ArrayList; import junit.framework.TestCase; @@ -27,6 +28,7 @@ import org.apache.hadoop.hdfs.HdfsConfig import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.common.GenerationStamp; +import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.protocol.BlockCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; @@ -37,9 +39,10 @@ import org.apache.hadoop.hdfs.server.pro */ public class TestHeartbeatHandling extends TestCase { /** - * Test if {@link FSNamesystem#handleHeartbeat(DatanodeRegistration, long, long, long, int, int)} - * can pick up replication and/or invalidate requests and - * observes the max limit + * Test if + * {@link FSNamesystem#handleHeartbeat(DatanodeRegistration, long, long, long, long, int, int)} + * can pick up replication and/or invalidate requests and observes the max + * limit */ public void testHeartbeat() throws Exception { final Configuration conf = new HdfsConfiguration(); @@ -47,11 +50,15 @@ public class TestHeartbeatHandling exten try { cluster.waitActive(); final FSNamesystem namesystem = cluster.getNamesystem(); - final DatanodeRegistration nodeReg = cluster.getDataNodes().get(0).dnRegistration; + final String poolId = namesystem.getBlockPoolId(); + final DatanodeRegistration nodeReg = + DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId); + DatanodeDescriptor dd = namesystem.getDatanode(nodeReg); final int REMAINING_BLOCKS = 1; - final int MAX_REPLICATE_LIMIT = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 2); + final int MAX_REPLICATE_LIMIT = + conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 2); final int MAX_INVALIDATE_LIMIT = DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT; final int MAX_INVALIDATE_BLOCKS = 2*MAX_INVALIDATE_LIMIT+REMAINING_BLOCKS; final int MAX_REPLICATE_BLOCKS = 2*MAX_REPLICATE_LIMIT+REMAINING_BLOCKS; @@ -64,43 +71,37 @@ public class TestHeartbeatHandling exten dd.addBlockToBeReplicated( new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP), ONE_TARGET); } - - DatanodeCommand[] cmds = namesystem.handleHeartbeat( - nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0); + DatanodeCommand[]cmds = sendHeartBeat(nodeReg, dd, namesystem); assertEquals(1, cmds.length); assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction()); assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length); - + ArrayList blockList = new ArrayList(MAX_INVALIDATE_BLOCKS); for (int i=0; i 0)); @@ -162,8 +163,9 @@ public class TestListCorruptFileBlocks { + " corrupt files. Expecting None.", badFiles.size() == 0); // Now deliberately corrupt one block - File data_dir = new File(System.getProperty("test.build.data"), - "dfs/data/data1/current/finalized"); + File storageDir = MiniDFSCluster.getStorageDir(0, 0); + File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, + cluster.getNamesystem().getBlockPoolId()); assertTrue("data directory does not exist", data_dir.exists()); File[] blocks = data_dir.listFiles(); assertTrue("Blocks do not exist in data-dir", (blocks != null) && @@ -207,7 +209,7 @@ public class TestListCorruptFileBlocks { badFiles.size() == 1); // restart namenode - cluster.restartNameNode(); + cluster.restartNameNode(0); fs = cluster.getFileSystem(); // wait until replication queues have been initialized @@ -279,23 +281,24 @@ public class TestListCorruptFileBlocks { int numCorrupt = corruptFileBlocks.size(); assertTrue(numCorrupt == 0); // delete the blocks - File baseDir = new File(System.getProperty("test.build.data", - "build/test/data"), "dfs/data"); - for (int i = 0; i < 8; i++) { - File data_dir = new File(baseDir, "data" + (i + 1) - + MiniDFSCluster.FINALIZED_DIR_NAME); - File[] blocks = data_dir.listFiles(); - if (blocks == null) - continue; - // assertTrue("Blocks do not exist in data-dir", (blocks != null) && - // (blocks.length > 0)); - for (int idx = 0; idx < blocks.length; idx++) { - if (!blocks[idx].getName().startsWith("blk_")) { + String bpid = cluster.getNamesystem().getBlockPoolId(); + for (int i = 0; i < 4; i++) { + for (int j = 0; j <= 1; j++) { + File storageDir = MiniDFSCluster.getStorageDir(i, j); + File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); + File[] blocks = data_dir.listFiles(); + if (blocks == null) continue; + // assertTrue("Blocks do not exist in data-dir", (blocks != null) && + // (blocks.length > 0)); + for (int idx = 0; idx < blocks.length; idx++) { + if (!blocks[idx].getName().startsWith("blk_")) { + continue; + } + LOG.info("Deliberately removing file " + blocks[idx].getName()); + assertTrue("Cannot remove file.", blocks[idx].delete()); + // break; } - LOG.info("Deliberately removing file " + blocks[idx].getName()); - assertTrue("Cannot remove file.", blocks[idx].delete()); - // break; } } @@ -380,17 +383,16 @@ public class TestListCorruptFileBlocks { DFSTestUtil util = new DFSTestUtil("testGetCorruptFiles", 3, 1, 1024); util.createFiles(fs, "/corruptData"); - final NameNode namenode = cluster.getNameNode(); RemoteIterator corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData")); int numCorrupt = countPaths(corruptFileBlocks); assertTrue(numCorrupt == 0); // delete the blocks - File baseDir = new File(System.getProperty("test.build.data", - "build/test/data"), "dfs/data"); - for (int i = 0; i < 8; i++) { - File data_dir = new File(baseDir, "data" + (i + 1) - + MiniDFSCluster.FINALIZED_DIR_NAME); + String bpid = cluster.getNamesystem().getBlockPoolId(); + // For loop through number of datadirectories per datanode (2) + for (int i = 0; i < 2; i++) { + File storageDir = MiniDFSCluster.getStorageDir(0, i); + File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); File[] blocks = data_dir.listFiles(); if (blocks == null) continue; @@ -461,19 +463,22 @@ public class TestListCorruptFileBlocks { badFiles.size() == 0); // Now deliberately blocks from all files - File baseDir = new File(System.getProperty("test.build.data", - "build/test/data"),"dfs/data"); - for (int i=0; i<8; i++) { - File data_dir = new File(baseDir, "data" +(i+1)+ MiniDFSCluster.FINALIZED_DIR_NAME); - File[] blocks = data_dir.listFiles(); - if (blocks == null) - continue; - - for (int idx = 0; idx < blocks.length; idx++) { - if (!blocks[idx].getName().startsWith("blk_")) { + final String bpid = cluster.getNamesystem().getBlockPoolId(); + for (int i=0; i<4; i++) { + for (int j=0; j<=1; j++) { + File storageDir = MiniDFSCluster.getStorageDir(i, j); + File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); + LOG.info("Removing files from " + data_dir); + File[] blocks = data_dir.listFiles(); + if (blocks == null) continue; + + for (int idx = 0; idx < blocks.length; idx++) { + if (!blocks[idx].getName().startsWith("blk_")) { + continue; + } + assertTrue("Cannot remove file.", blocks[idx].delete()); } - assertTrue("Cannot remove file.", blocks[idx].delete()); } } Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java?rev=1097905&r1=1097904&r2=1097905&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java Fri Apr 29 18:16:32 2011 @@ -23,6 +23,7 @@ import org.apache.hadoop.conf.Configurat import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.test.GenericTestUtils; import org.junit.Test; public class TestNNThroughputBenchmark { @@ -35,7 +36,7 @@ public class TestNNThroughputBenchmark { Configuration conf = new HdfsConfiguration(); FileSystem.setDefaultUri(conf, "hdfs://localhost:" + 0); conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); - NameNode.format(conf); + GenericTestUtils.formatNamenode(conf); String[] args = new String[] {"-op", "all"}; NNThroughputBenchmark.runBenchmark(conf, Arrays.asList(args)); } Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeJspHelper.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeJspHelper.java?rev=1097905&r1=1097904&r2=1097905&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeJspHelper.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeJspHelper.java Fri Apr 29 18:16:32 2011 @@ -56,8 +56,9 @@ public class TestNameNodeJspHelper { public void testDelegationToken() throws IOException, InterruptedException { NameNode nn = cluster.getNameNode(); HttpServletRequest request = mock(HttpServletRequest.class); - String tokenString = NamenodeJspHelper - .getDelegationToken(nn, request, conf); + UserGroupInformation ugi = UserGroupInformation.createRemoteUser("auser"); + String tokenString = NamenodeJspHelper.getDelegationToken(nn, request, + conf, ugi); //tokenString returned must be null because security is disabled Assert.assertEquals(null, tokenString); } Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java?rev=1097905&r1=1097904&r2=1097905&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java Fri Apr 29 18:16:32 2011 @@ -46,6 +46,13 @@ public class TestNameNodeMXBean { MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); ObjectName mxbeanName = new ObjectName("HadoopInfo:type=NameNodeInfo"); + // get attribute "ClusterId" + String clusterId = (String) mbs.getAttribute(mxbeanName, "ClusterId"); + Assert.assertEquals(fsn.getClusterId(), clusterId); + // get attribute "BlockPoolId" + String blockpoolId = (String) mbs.getAttribute(mxbeanName, + "BlockPoolId"); + Assert.assertEquals(fsn.getBlockPoolId(), blockpoolId); // get attribute "Version" String version = (String) mbs.getAttribute(mxbeanName, "Version"); Assert.assertEquals(fsn.getVersion(), version);