hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject svn commit: r1099687 [14/15] - in /hadoop/hdfs/branches/HDFS-1073: ./ bin/ src/c++/libhdfs/ src/contrib/hdfsproxy/ src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ src/java/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/pr...
Date Thu, 05 May 2011 05:40:13 GMT
Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java Thu May  5 05:40:07 2011
@@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
@@ -46,8 +47,8 @@ import org.junit.Test;
  * This tests InterDataNodeProtocol for block handling. 
  */
 public class TestInterDatanodeProtocol {
-  public static void checkMetaInfo(Block b, DataNode dn) throws IOException {
-    Block metainfo = dn.data.getStoredBlock(b.getBlockId());
+  public static void checkMetaInfo(ExtendedBlock b, DataNode dn) throws IOException {
+    Block metainfo = dn.data.getStoredBlock(b.getBlockPoolId(), b.getBlockId());
     Assert.assertEquals(b.getBlockId(), metainfo.getBlockId());
     Assert.assertEquals(b.getNumBytes(), metainfo.getNumBytes());
   }
@@ -97,10 +98,12 @@ public class TestInterDatanodeProtocol {
       assertTrue(datanode != null);
       
       //stop block scanner, so we could compare lastScanTime
-      datanode.blockScannerThread.interrupt();
+      if (datanode.blockScanner != null) {
+        datanode.blockScanner.shutdown();
+      }
 
       //verify BlockMetaDataInfo
-      Block b = locatedblock.getBlock();
+      ExtendedBlock b = locatedblock.getBlock();
       InterDatanodeProtocol.LOG.info("b=" + b + ", " + b.getClass());
       checkMetaInfo(b, datanode);
       long recoveryId = b.getGenerationStamp() + 1;
@@ -108,7 +111,7 @@ public class TestInterDatanodeProtocol {
           new RecoveringBlock(b, locatedblock.getLocations(), recoveryId));
 
       //verify updateBlock
-      Block newblock = new Block(
+      ExtendedBlock newblock = new ExtendedBlock(b.getBlockPoolId(),
           b.getBlockId(), b.getNumBytes()/2, b.getGenerationStamp()+1);
       idp.updateReplicaUnderRecovery(b, recoveryId, newblock.getNumBytes());
       checkMetaInfo(newblock, datanode);
@@ -129,44 +132,47 @@ public class TestInterDatanodeProtocol {
     Assert.assertEquals(originalInfo.getState(), recoveryInfo.getOriginalReplicaState());
   }
 
-  /** Test {@link FSDataset#initReplicaRecovery(ReplicasMap, Block, long)} */
+  /** Test 
+   * {@link FSDataset#initReplicaRecovery(String, ReplicasMap, Block, long)}
+   */
   @Test
   public void testInitReplicaRecovery() throws IOException {
     final long firstblockid = 10000L;
     final long gs = 7777L;
     final long length = 22L;
-    final ReplicasMap map = new ReplicasMap();
+    final ReplicasMap map = new ReplicasMap(this);
+    String bpid = "BP-TEST";
     final Block[] blocks = new Block[5];
     for(int i = 0; i < blocks.length; i++) {
       blocks[i] = new Block(firstblockid + i, length, gs);
-      map.add(createReplicaInfo(blocks[i]));
+      map.add(bpid, createReplicaInfo(blocks[i]));
     }
     
     { 
       //normal case
       final Block b = blocks[0];
-      final ReplicaInfo originalInfo = map.get(b);
+      final ReplicaInfo originalInfo = map.get(bpid, b);
 
       final long recoveryid = gs + 1;
-      final ReplicaRecoveryInfo recoveryInfo = FSDataset.initReplicaRecovery(map, blocks[0], recoveryid);
+      final ReplicaRecoveryInfo recoveryInfo = FSDataset.initReplicaRecovery(bpid, map, blocks[0], recoveryid);
       assertEquals(originalInfo, recoveryInfo);
 
-      final ReplicaUnderRecovery updatedInfo = (ReplicaUnderRecovery)map.get(b);
+      final ReplicaUnderRecovery updatedInfo = (ReplicaUnderRecovery)map.get(bpid, b);
       Assert.assertEquals(originalInfo.getBlockId(), updatedInfo.getBlockId());
       Assert.assertEquals(recoveryid, updatedInfo.getRecoveryID());
 
       //recover one more time 
       final long recoveryid2 = gs + 2;
-      final ReplicaRecoveryInfo recoveryInfo2 = FSDataset.initReplicaRecovery(map, blocks[0], recoveryid2);
+      final ReplicaRecoveryInfo recoveryInfo2 = FSDataset.initReplicaRecovery(bpid, map, blocks[0], recoveryid2);
       assertEquals(originalInfo, recoveryInfo2);
 
-      final ReplicaUnderRecovery updatedInfo2 = (ReplicaUnderRecovery)map.get(b);
+      final ReplicaUnderRecovery updatedInfo2 = (ReplicaUnderRecovery)map.get(bpid, b);
       Assert.assertEquals(originalInfo.getBlockId(), updatedInfo2.getBlockId());
       Assert.assertEquals(recoveryid2, updatedInfo2.getRecoveryID());
       
       //case RecoveryInProgressException
       try {
-        FSDataset.initReplicaRecovery(map, b, recoveryid);
+        FSDataset.initReplicaRecovery(bpid, map, b, recoveryid);
         Assert.fail();
       }
       catch(RecoveryInProgressException ripe) {
@@ -177,7 +183,7 @@ public class TestInterDatanodeProtocol {
     { // BlockRecoveryFI_01: replica not found
       final long recoveryid = gs + 1;
       final Block b = new Block(firstblockid - 1, length, gs);
-      ReplicaRecoveryInfo r = FSDataset.initReplicaRecovery(map, b, recoveryid);
+      ReplicaRecoveryInfo r = FSDataset.initReplicaRecovery(bpid, map, b, recoveryid);
       Assert.assertNull("Data-node should not have this replica.", r);
     }
     
@@ -185,7 +191,7 @@ public class TestInterDatanodeProtocol {
       final long recoveryid = gs - 1;
       final Block b = new Block(firstblockid + 1, length, gs);
       try {
-        FSDataset.initReplicaRecovery(map, b, recoveryid);
+        FSDataset.initReplicaRecovery(bpid, map, b, recoveryid);
         Assert.fail();
       }
       catch(IOException ioe) {
@@ -198,7 +204,7 @@ public class TestInterDatanodeProtocol {
       final long recoveryid = gs + 1;
       final Block b = new Block(firstblockid, length, gs+1);
       try {
-        FSDataset.initReplicaRecovery(map, b, recoveryid);
+        FSDataset.initReplicaRecovery(bpid, map, b, recoveryid);
         fail("InitReplicaRecovery should fail because replica's " +
         		"gs is less than the block's gs");
       } catch (IOException e) {
@@ -208,7 +214,10 @@ public class TestInterDatanodeProtocol {
     }
   }
 
-  /** Test {@link FSDataset#updateReplicaUnderRecovery(ReplicaUnderRecovery, long, long)} */
+  /** 
+   * Test  for
+   * {@link FSDataset#updateReplicaUnderRecovery(ExtendedBlock, long, long)} 
+   * */
   @Test
   public void testUpdateReplicaUnderRecovery() throws IOException {
     final Configuration conf = new HdfsConfiguration();
@@ -217,6 +226,7 @@ public class TestInterDatanodeProtocol {
     try {
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
       cluster.waitActive();
+      String bpid = cluster.getNamesystem().getBlockPoolId();
 
       //create a file
       DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
@@ -237,14 +247,14 @@ public class TestInterDatanodeProtocol {
       final FSDataset fsdataset = (FSDataset)datanode.data;
 
       //initReplicaRecovery
-      final Block b = locatedblock.getBlock();
+      final ExtendedBlock b = locatedblock.getBlock();
       final long recoveryid = b.getGenerationStamp() + 1;
       final long newlength = b.getNumBytes() - 1;
       final ReplicaRecoveryInfo rri = fsdataset.initReplicaRecovery(
           new RecoveringBlock(b, null, recoveryid));
 
       //check replica
-      final ReplicaInfo replica = fsdataset.fetchReplicaInfo(b.getBlockId());
+      final ReplicaInfo replica = fsdataset.fetchReplicaInfo(bpid, b.getBlockId());
       Assert.assertEquals(ReplicaState.RUR, replica.getState());
 
       //check meta data before update
@@ -254,8 +264,8 @@ public class TestInterDatanodeProtocol {
       //with (block length) != (stored replica's on disk length). 
       {
         //create a block with same id and gs but different length.
-        final Block tmp = new Block(rri.getBlockId(), rri.getNumBytes() - 1,
-            rri.getGenerationStamp());
+        final ExtendedBlock tmp = new ExtendedBlock(b.getBlockPoolId(), rri
+            .getBlockId(), rri.getNumBytes() - 1, rri.getGenerationStamp());
         try {
           //update should fail
           fsdataset.updateReplicaUnderRecovery(tmp, recoveryid, newlength);
@@ -267,7 +277,7 @@ public class TestInterDatanodeProtocol {
 
       //update
       final ReplicaInfo finalized = fsdataset.updateReplicaUnderRecovery(
-          rri, recoveryid, newlength);
+          new ExtendedBlock(b.getBlockPoolId(), rri), recoveryid, newlength);
 
       //check meta data after update
       FSDataset.checkReplicaFiles(finalized);

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestReplicasMap.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestReplicasMap.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestReplicasMap.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestReplicasMap.java Thu May  5 05:40:07 2011
@@ -26,12 +26,13 @@ import org.junit.Test;
  * Unit test for ReplicasMap class
  */
 public class TestReplicasMap {
-  private static final ReplicasMap map = new ReplicasMap();
+  private static final ReplicasMap map = new ReplicasMap(TestReplicasMap.class);
+  private static final String bpid = "BP-TEST";
   private static final  Block block = new Block(1234, 1234, 1234);
   
   @BeforeClass
   public static void setup() {
-    map.add(new FinalizedReplica(block, null, null));
+    map.add(bpid, new FinalizedReplica(block, null, null));
   }
   
   /**
@@ -41,35 +42,35 @@ public class TestReplicasMap {
   public void testGet() {
     // Test 1: null argument throws invalid argument exception
     try {
-      map.get(null);
+      map.get(bpid, null);
       fail("Expected exception not thrown");
     } catch (IllegalArgumentException expected) { }
     
     // Test 2: successful lookup based on block
-    assertNotNull(map.get(block));
+    assertNotNull(map.get(bpid, block));
     
     // Test 3: Lookup failure - generation stamp mismatch 
     Block b = new Block(block);
     b.setGenerationStamp(0);
-    assertNull(map.get(b));
+    assertNull(map.get(bpid, b));
     
     // Test 4: Lookup failure - blockID mismatch
     b.setGenerationStamp(block.getGenerationStamp());
     b.setBlockId(0);
-    assertNull(map.get(b));
+    assertNull(map.get(bpid, b));
     
     // Test 5: successful lookup based on block ID
-    assertNotNull(map.get(block.getBlockId()));
+    assertNotNull(map.get(bpid, block.getBlockId()));
     
     // Test 6: failed lookup for invalid block ID
-    assertNull(map.get(0));
+    assertNull(map.get(bpid, 0));
   }
   
   @Test
   public void testAdd() {
     // Test 1: null argument throws invalid argument exception
     try {
-      map.add(null);
+      map.add(bpid, null);
       fail("Expected exception not thrown");
     } catch (IllegalArgumentException expected) { }
   }
@@ -78,28 +79,28 @@ public class TestReplicasMap {
   public void testRemove() {
     // Test 1: null argument throws invalid argument exception
     try {
-      map.remove(null);
+      map.remove(bpid, null);
       fail("Expected exception not thrown");
     } catch (IllegalArgumentException expected) { }
     
     // Test 2: remove failure - generation stamp mismatch 
     Block b = new Block(block);
     b.setGenerationStamp(0);
-    assertNull(map.remove(b));
+    assertNull(map.remove(bpid, b));
     
     // Test 3: remove failure - blockID mismatch
     b.setGenerationStamp(block.getGenerationStamp());
     b.setBlockId(0);
-    assertNull(map.remove(b));
+    assertNull(map.remove(bpid, b));
     
     // Test 4: remove success
-    assertNotNull(map.remove(block));
+    assertNotNull(map.remove(bpid, block));
     
     // Test 5: remove failure - invalid blockID
-    assertNull(map.remove(0));
+    assertNull(map.remove(bpid, 0));
     
     // Test 6: remove success
-    map.add(new FinalizedReplica(block, null, null));
-    assertNotNull(map.remove(block.getBlockId()));
+    map.add(bpid, new FinalizedReplica(block, null, null));
+    assertNotNull(map.remove(bpid, block.getBlockId()));
   }
 }

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java Thu May  5 05:40:07 2011
@@ -18,6 +18,9 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
 import junit.framework.Assert;
 import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
 import org.apache.hadoop.util.ReflectionUtils;
@@ -29,28 +32,28 @@ public class TestRoundRobinVolumesPolicy
   // Test the Round-Robin block-volume choosing algorithm.
   @Test
   public void testRR() throws Exception {
-    FSVolume[] volumes = new FSVolume[2];
+    final List<FSVolume> volumes = new ArrayList<FSVolume>();
 
     // First volume, with 100 bytes of space.
-    volumes[0] = Mockito.mock(FSVolume.class);
-    Mockito.when(volumes[0].getAvailable()).thenReturn(100L);
+    volumes.add(Mockito.mock(FSVolume.class));
+    Mockito.when(volumes.get(0).getAvailable()).thenReturn(100L);
 
     // Second volume, with 200 bytes of space.
-    volumes[1] = Mockito.mock(FSVolume.class);
-    Mockito.when(volumes[1].getAvailable()).thenReturn(200L);
+    volumes.add(Mockito.mock(FSVolume.class));
+    Mockito.when(volumes.get(1).getAvailable()).thenReturn(200L);
 
     RoundRobinVolumesPolicy policy = ReflectionUtils.newInstance(
         RoundRobinVolumesPolicy.class, null);
     
     // Test two rounds of round-robin choosing
-    Assert.assertEquals(volumes[0], policy.chooseVolume(volumes, 0));
-    Assert.assertEquals(volumes[1], policy.chooseVolume(volumes, 0));
-    Assert.assertEquals(volumes[0], policy.chooseVolume(volumes, 0));
-    Assert.assertEquals(volumes[1], policy.chooseVolume(volumes, 0));
+    Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 0));
+    Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 0));
+    Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 0));
+    Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 0));
 
     // The first volume has only 100L space, so the policy should
     // wisely choose the second one in case we ask for more.
-    Assert.assertEquals(volumes[1], policy.chooseVolume(volumes, 150));
+    Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 150));
 
     // Fail if no volume can be chosen?
     try {

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java Thu May  5 05:40:07 2011
@@ -27,6 +27,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams;
@@ -34,15 +35,10 @@ import org.apache.hadoop.util.DataChecks
 
 /**
  * this class tests the methods of the  SimulatedFSDataset.
- *
  */
-
 public class TestSimulatedFSDataset extends TestCase {
-  
   Configuration conf = null;
-  
-
-  
+  static final String bpid = "BP-TEST";
   static final int NUMBLOCKS = 20;
   static final int BLOCK_LENGTH_MULTIPLIER = 79;
 
@@ -50,7 +46,6 @@ public class TestSimulatedFSDataset exte
     super.setUp();
       conf = new HdfsConfiguration();
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
- 
   }
 
   protected void tearDown() throws Exception {
@@ -61,10 +56,13 @@ public class TestSimulatedFSDataset exte
     return blkid*BLOCK_LENGTH_MULTIPLIER;
   }
   
-  int addSomeBlocks(FSDatasetInterface fsdataset, int startingBlockId) throws IOException {
+  int addSomeBlocks(FSDatasetInterface fsdataset, int startingBlockId)
+      throws IOException {
     int bytesAdded = 0;
     for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
-      Block b = new Block(i, 0, 0); // we pass expected len as zero, - fsdataset should use the sizeof actual data written
+      ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0); 
+      // we pass expected len as zero, - fsdataset should use the sizeof actual
+      // data written
       ReplicaInPipelineInterface bInfo = fsdataset.createRbw(b);
       BlockWriteStreams out = bInfo.createStreams(true, 512, 4);
       try {
@@ -89,8 +87,8 @@ public class TestSimulatedFSDataset exte
   }
 
   public void testGetMetaData() throws IOException {
-    FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
-    Block b = new Block(1, 5, 0);
+    FSDatasetInterface fsdataset = getSimulatedFSDataset(); 
+    ExtendedBlock b = new ExtendedBlock(bpid, 1, 5, 0);
     try {
       assertFalse(fsdataset.metaFileExists(b));
       assertTrue("Expected an IO exception", false);
@@ -98,7 +96,7 @@ public class TestSimulatedFSDataset exte
       // ok - as expected
     }
     addSomeBlocks(fsdataset); // Only need to add one but ....
-    b = new Block(1, 0, 0);
+    b = new ExtendedBlock(bpid, 1, 0, 0);
     InputStream metaInput = fsdataset.getMetaDataInputStream(b);
     DataInputStream metaDataInput = new DataInputStream(metaInput);
     short version = metaDataInput.readShort();
@@ -110,19 +108,18 @@ public class TestSimulatedFSDataset exte
 
 
   public void testStorageUsage() throws IOException {
-    FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
+    FSDatasetInterface fsdataset = getSimulatedFSDataset(); 
     assertEquals(fsdataset.getDfsUsed(), 0);
     assertEquals(fsdataset.getRemaining(), fsdataset.getCapacity());
     int bytesAdded = addSomeBlocks(fsdataset);
     assertEquals(bytesAdded, fsdataset.getDfsUsed());
     assertEquals(fsdataset.getCapacity()-bytesAdded,  fsdataset.getRemaining());
-    
   }
 
 
 
-  void  checkBlockDataAndSize(FSDatasetInterface fsdataset, 
-              Block b, long expectedLen) throws IOException { 
+  void checkBlockDataAndSize(FSDatasetInterface fsdataset, ExtendedBlock b,
+      long expectedLen) throws IOException { 
     InputStream input = fsdataset.getBlockInputStream(b);
     long lengthRead = 0;
     int data;
@@ -134,36 +131,35 @@ public class TestSimulatedFSDataset exte
   }
   
   public void testWriteRead() throws IOException {
-    FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
+    FSDatasetInterface fsdataset = getSimulatedFSDataset(); 
     addSomeBlocks(fsdataset);
     for (int i=1; i <= NUMBLOCKS; ++i) {
-      Block b = new Block(i, 0, 0);
+      ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0);
       assertTrue(fsdataset.isValidBlock(b));
       assertEquals(blockIdToLen(i), fsdataset.getLength(b));
       checkBlockDataAndSize(fsdataset, b, blockIdToLen(i));
     }
   }
 
-
-
   public void testGetBlockReport() throws IOException {
-    SimulatedFSDataset fsdataset = new SimulatedFSDataset(conf); 
-    BlockListAsLongs blockReport = fsdataset.getBlockReport();
+    SimulatedFSDataset fsdataset = getSimulatedFSDataset(); 
+    BlockListAsLongs blockReport = fsdataset.getBlockReport(bpid);
     assertEquals(0, blockReport.getNumberOfBlocks());
-    int bytesAdded = addSomeBlocks(fsdataset);
-    blockReport = fsdataset.getBlockReport();
+    addSomeBlocks(fsdataset);
+    blockReport = fsdataset.getBlockReport(bpid);
     assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
     for (Block b: blockReport) {
       assertNotNull(b);
       assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
     }
   }
+  
   public void testInjectionEmpty() throws IOException {
-    SimulatedFSDataset fsdataset = new SimulatedFSDataset(conf); 
-    BlockListAsLongs blockReport = fsdataset.getBlockReport();
+    SimulatedFSDataset fsdataset = getSimulatedFSDataset(); 
+    BlockListAsLongs blockReport = fsdataset.getBlockReport(bpid);
     assertEquals(0, blockReport.getNumberOfBlocks());
     int bytesAdded = addSomeBlocks(fsdataset);
-    blockReport = fsdataset.getBlockReport();
+    blockReport = fsdataset.getBlockReport(bpid);
     assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
     for (Block b: blockReport) {
       assertNotNull(b);
@@ -172,28 +168,26 @@ public class TestSimulatedFSDataset exte
     
     // Inject blocks into an empty fsdataset
     //  - injecting the blocks we got above.
-  
-   
-    SimulatedFSDataset sfsdataset = new SimulatedFSDataset(conf);
-    sfsdataset.injectBlocks(blockReport);
-    blockReport = sfsdataset.getBlockReport();
+    SimulatedFSDataset sfsdataset = getSimulatedFSDataset();
+    sfsdataset.injectBlocks(bpid, blockReport);
+    blockReport = sfsdataset.getBlockReport(bpid);
     assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
     for (Block b: blockReport) {
       assertNotNull(b);
       assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
-      assertEquals(blockIdToLen(b.getBlockId()), sfsdataset.getLength(b));
+      assertEquals(blockIdToLen(b.getBlockId()), sfsdataset
+          .getLength(new ExtendedBlock(bpid, b)));
     }
     assertEquals(bytesAdded, sfsdataset.getDfsUsed());
     assertEquals(sfsdataset.getCapacity()-bytesAdded, sfsdataset.getRemaining());
   }
 
   public void testInjectionNonEmpty() throws IOException {
-    SimulatedFSDataset fsdataset = new SimulatedFSDataset(conf); 
-    
-    BlockListAsLongs blockReport = fsdataset.getBlockReport();
+    SimulatedFSDataset fsdataset = getSimulatedFSDataset(); 
+    BlockListAsLongs blockReport = fsdataset.getBlockReport(bpid);
     assertEquals(0, blockReport.getNumberOfBlocks());
     int bytesAdded = addSomeBlocks(fsdataset);
-    blockReport = fsdataset.getBlockReport();
+    blockReport = fsdataset.getBlockReport(bpid);
     assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
     for (Block b: blockReport) {
       assertNotNull(b);
@@ -203,44 +197,41 @@ public class TestSimulatedFSDataset exte
     
     // Inject blocks into an non-empty fsdataset
     //  - injecting the blocks we got above.
-  
-   
-    SimulatedFSDataset sfsdataset = new SimulatedFSDataset(conf);
+    SimulatedFSDataset sfsdataset = getSimulatedFSDataset();
     // Add come blocks whose block ids do not conflict with
     // the ones we are going to inject.
     bytesAdded += addSomeBlocks(sfsdataset, NUMBLOCKS+1);
-    BlockListAsLongs blockReport2 = sfsdataset.getBlockReport();
+    sfsdataset.getBlockReport(bpid);
     assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
-    blockReport2 = sfsdataset.getBlockReport();
+    sfsdataset.getBlockReport(bpid);
     assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
-    sfsdataset.injectBlocks(blockReport);
-    blockReport = sfsdataset.getBlockReport();
+    sfsdataset.injectBlocks(bpid, blockReport);
+    blockReport = sfsdataset.getBlockReport(bpid);
     assertEquals(NUMBLOCKS*2, blockReport.getNumberOfBlocks());
     for (Block b: blockReport) {
       assertNotNull(b);
       assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
-      assertEquals(blockIdToLen(b.getBlockId()), sfsdataset.getLength(b));
+      assertEquals(blockIdToLen(b.getBlockId()), sfsdataset
+          .getLength(new ExtendedBlock(bpid, b)));
     }
     assertEquals(bytesAdded, sfsdataset.getDfsUsed());
     assertEquals(sfsdataset.getCapacity()-bytesAdded,  sfsdataset.getRemaining());
     
-    
     // Now test that the dataset cannot be created if it does not have sufficient cap
-
     conf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY, 10);
  
     try {
-      sfsdataset = new SimulatedFSDataset(conf);
-      sfsdataset.injectBlocks(blockReport);
+      sfsdataset = getSimulatedFSDataset();
+      sfsdataset.addBlockPool(bpid, conf);
+      sfsdataset.injectBlocks(bpid, blockReport);
       assertTrue("Expected an IO exception", false);
     } catch (IOException e) {
       // ok - as expected
     }
-
   }
 
-  public void checkInvalidBlock(Block b) throws IOException {
-    FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
+  public void checkInvalidBlock(ExtendedBlock b) throws IOException {
+    FSDatasetInterface fsdataset = getSimulatedFSDataset(); 
     assertFalse(fsdataset.isValidBlock(b));
     try {
       fsdataset.getLength(b);
@@ -262,41 +253,42 @@ public class TestSimulatedFSDataset exte
     } catch (IOException e) {
       // ok - as expected
     }
-    
   }
   
   public void testInValidBlocks() throws IOException {
-    FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
-    Block b = new Block(1, 5, 0);
+    FSDatasetInterface fsdataset = getSimulatedFSDataset(); 
+    ExtendedBlock b = new ExtendedBlock(bpid, 1, 5, 0);
     checkInvalidBlock(b);
     
     // Now check invlaid after adding some blocks
     addSomeBlocks(fsdataset);
-    b = new Block(NUMBLOCKS + 99, 5, 0);
+    b = new ExtendedBlock(bpid, NUMBLOCKS + 99, 5, 0);
     checkInvalidBlock(b);
-    
   }
 
   public void testInvalidate() throws IOException {
-    FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
+    FSDatasetInterface fsdataset = getSimulatedFSDataset(); 
     int bytesAdded = addSomeBlocks(fsdataset);
     Block[] deleteBlocks = new Block[2];
     deleteBlocks[0] = new Block(1, 0, 0);
     deleteBlocks[1] = new Block(2, 0, 0);
-    fsdataset.invalidate(deleteBlocks);
-    checkInvalidBlock(deleteBlocks[0]);
-    checkInvalidBlock(deleteBlocks[1]);
+    fsdataset.invalidate(bpid, deleteBlocks);
+    checkInvalidBlock(new ExtendedBlock(bpid, deleteBlocks[0]));
+    checkInvalidBlock(new ExtendedBlock(bpid, deleteBlocks[1]));
     long sizeDeleted = blockIdToLen(1) + blockIdToLen(2);
     assertEquals(bytesAdded-sizeDeleted, fsdataset.getDfsUsed());
     assertEquals(fsdataset.getCapacity()-bytesAdded+sizeDeleted,  fsdataset.getRemaining());
     
-    
-    
     // Now make sure the rest of the blocks are valid
     for (int i=3; i <= NUMBLOCKS; ++i) {
       Block b = new Block(i, 0, 0);
-      assertTrue(fsdataset.isValidBlock(b));
+      assertTrue(fsdataset.isValidBlock(new ExtendedBlock(bpid, b)));
     }
   }
-
+  
+  private SimulatedFSDataset getSimulatedFSDataset() throws IOException {
+    SimulatedFSDataset fsdataset = new SimulatedFSDataset(conf); 
+    fsdataset.addBlockPool(bpid, conf);
+    return fsdataset;
+  }
 }

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java Thu May  5 05:40:07 2011
@@ -29,11 +29,12 @@ import org.apache.hadoop.hdfs.DFSTestUti
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.log4j.Level;
 import org.junit.Assert;
 import org.junit.Test;
@@ -49,14 +50,14 @@ public class TestTransferRbw {
   private static final Random RAN = new Random();
   private static final short REPLICATION = (short)1;
 
-  private static ReplicaBeingWritten getRbw(final DataNode datanode
-      ) throws InterruptedException {
-    return (ReplicaBeingWritten)getReplica(datanode, ReplicaState.RBW);
+  private static ReplicaBeingWritten getRbw(final DataNode datanode,
+      String bpid) throws InterruptedException {
+    return (ReplicaBeingWritten)getReplica(datanode, bpid, ReplicaState.RBW);
   }
   private static ReplicaInPipeline getReplica(final DataNode datanode,
-      final ReplicaState expectedState) throws InterruptedException {
+      final String bpid, final ReplicaState expectedState) throws InterruptedException {
     final FSDataset dataset = ((FSDataset)datanode.data);
-    final Collection<ReplicaInfo> replicas = dataset.volumeMap.replicas();
+    final Collection<ReplicaInfo> replicas = dataset.volumeMap.replicas(bpid);
     for(int i = 0; i < 5 && replicas.size() == 0; i++) {
       LOG.info("wait since replicas.size() == 0; i=" + i);
       Thread.sleep(1000);
@@ -94,9 +95,10 @@ public class TestTransferRbw {
       final ReplicaBeingWritten oldrbw;
       final DataNode newnode;
       final DatanodeInfo newnodeinfo;
+      final String bpid = cluster.getNamesystem().getBlockPoolId();
       {
         final DataNode oldnode = cluster.getDataNodes().get(0);
-        oldrbw = getRbw(oldnode);
+        oldrbw = getRbw(oldnode, bpid);
         LOG.info("oldrbw = " + oldrbw);
         
         //add a datanode
@@ -109,15 +111,15 @@ public class TestTransferRbw {
               ).getDatanodeReport(DatanodeReportType.LIVE);
           Assert.assertEquals(2, datatnodeinfos.length);
           int i = 0;
-          for(; i < datatnodeinfos.length
-                && !datatnodeinfos[i].equals(newnode.dnRegistration); i++);
+          for(DatanodeRegistration dnReg = newnode.getDNRegistrationForBP(bpid);
+              i < datatnodeinfos.length && !datatnodeinfos[i].equals(dnReg); i++);
           Assert.assertTrue(i < datatnodeinfos.length);
           newnodeinfo = datatnodeinfos[i];
           oldnodeinfo = datatnodeinfos[1 - i];
         }
         
         //transfer RBW
-        final Block b = new Block(oldrbw.getBlockId(), oldrbw.getBytesAcked(),
+        final ExtendedBlock b = new ExtendedBlock(bpid, oldrbw.getBlockId(), oldrbw.getBytesAcked(),
             oldrbw.getGenerationStamp());
         final DataTransferProtocol.Status s = DFSTestUtil.transferRbw(
             b, fs.getClient(), oldnodeinfo, newnodeinfo);
@@ -125,7 +127,7 @@ public class TestTransferRbw {
       }
 
       //check new rbw
-      final ReplicaBeingWritten newrbw = getRbw(newnode);
+      final ReplicaBeingWritten newrbw = getRbw(newnode, bpid);
       LOG.info("newrbw = " + newrbw);
       Assert.assertEquals(oldrbw.getBlockId(), newrbw.getBlockId());
       Assert.assertEquals(oldrbw.getGenerationStamp(), newrbw.getGenerationStamp());

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java Thu May  5 05:40:07 2011
@@ -19,23 +19,17 @@ package org.apache.hadoop.hdfs.server.da
 
 import java.io.IOException;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
-
 import org.junit.Assert;
 import org.junit.Test;
 
 /** Test if FSDataset#append, writeToRbw, and writeToTmp */
 public class TestWriteToReplica {
-  final private static Block[] blocks = new Block[] {
-    new Block(1, 1, 2001), new Block(2, 1, 2002), 
-    new Block(3, 1, 2003), new Block(4, 1, 2004),
-    new Block(5, 1, 2005), new Block(6, 1, 2006)
-  };
+
   final private static int FINALIZED = 0;
   final private static int TEMPORARY = 1;
   final private static int RBW = 2;
@@ -47,16 +41,19 @@ public class TestWriteToReplica {
   @Test
   public void testClose() throws Exception {
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
+    
     try {
       cluster.waitActive();
       DataNode dn = cluster.getDataNodes().get(0);
       FSDataset dataSet = (FSDataset)dn.data;
 
       // set up replicasMap
-      setup(dataSet);
+      String bpid = cluster.getNamesystem().getBlockPoolId();
+      
+      ExtendedBlock[] blocks = setup(bpid, dataSet);
 
       // test close
-      testClose(dataSet);
+      testClose(dataSet, blocks);
     } finally {
       cluster.shutdown();
     }
@@ -72,10 +69,11 @@ public class TestWriteToReplica {
       FSDataset dataSet = (FSDataset)dn.data;
 
       // set up replicasMap
-      setup(dataSet);
+      String bpid = cluster.getNamesystem().getBlockPoolId();
+      ExtendedBlock[] blocks = setup(bpid, dataSet);
 
       // test append
-      testAppend(dataSet);
+      testAppend(bpid, dataSet, blocks);
     } finally {
       cluster.shutdown();
     }
@@ -91,10 +89,11 @@ public class TestWriteToReplica {
       FSDataset dataSet = (FSDataset)dn.data;
 
       // set up replicasMap
-      setup(dataSet);
+      String bpid = cluster.getNamesystem().getBlockPoolId();
+      ExtendedBlock[] blocks = setup(bpid, dataSet);
 
       // test writeToRbw
-      testWriteToRbw(dataSet);
+      testWriteToRbw(dataSet, blocks);
     } finally {
       cluster.shutdown();
     }
@@ -110,49 +109,70 @@ public class TestWriteToReplica {
       FSDataset dataSet = (FSDataset)dn.data;
 
       // set up replicasMap
-      setup(dataSet);
+      String bpid = cluster.getNamesystem().getBlockPoolId();
+      ExtendedBlock[] blocks = setup(bpid, dataSet);
 
       // test writeToTemporary
-      testWriteToTemporary(dataSet);
+      testWriteToTemporary(dataSet, blocks);
     } finally {
       cluster.shutdown();
     }
   }
   
-  private void setup(FSDataset dataSet) throws IOException {
+  /**
+   * Generate testing environment and return a collection of blocks
+   * on which to run the tests.
+   * 
+   * @param bpid Block pool ID to generate blocks for
+   * @param dataSet Namespace in which to insert blocks
+   * @return Contrived blocks for further testing.
+   * @throws IOException
+   */
+  private ExtendedBlock[] setup(String bpid, FSDataset dataSet) throws IOException {
     // setup replicas map
+    
+    ExtendedBlock[] blocks = new ExtendedBlock[] {
+        new ExtendedBlock(bpid, 1, 1, 2001), new ExtendedBlock(bpid, 2, 1, 2002), 
+        new ExtendedBlock(bpid, 3, 1, 2003), new ExtendedBlock(bpid, 4, 1, 2004),
+        new ExtendedBlock(bpid, 5, 1, 2005), new ExtendedBlock(bpid, 6, 1, 2006)
+    };
+    
     ReplicasMap replicasMap = dataSet.volumeMap;
     FSVolume vol = dataSet.volumes.getNextVolume(0);
     ReplicaInfo replicaInfo = new FinalizedReplica(
-        blocks[FINALIZED], vol, vol.getDir());
-    replicasMap.add(replicaInfo);
+        blocks[FINALIZED].getLocalBlock(), vol, vol.getDir());
+    replicasMap.add(bpid, replicaInfo);
     replicaInfo.getBlockFile().createNewFile();
     replicaInfo.getMetaFile().createNewFile();
     
-    replicasMap.add(new ReplicaInPipeline(
+    replicasMap.add(bpid, new ReplicaInPipeline(
         blocks[TEMPORARY].getBlockId(),
         blocks[TEMPORARY].getGenerationStamp(), vol, 
-        vol.createTmpFile(blocks[TEMPORARY]).getParentFile()));
+        vol.createTmpFile(bpid, blocks[TEMPORARY].getLocalBlock()).getParentFile()));
     
-    replicaInfo = new ReplicaBeingWritten(blocks[RBW], vol, 
-        vol.createRbwFile(blocks[RBW]).getParentFile(), null);
-    replicasMap.add(replicaInfo);
+    replicaInfo = new ReplicaBeingWritten(blocks[RBW].getLocalBlock(), vol, 
+        vol.createRbwFile(bpid, blocks[RBW].getLocalBlock()).getParentFile(), null);
+    replicasMap.add(bpid, replicaInfo);
     replicaInfo.getBlockFile().createNewFile();
     replicaInfo.getMetaFile().createNewFile();
     
-    replicasMap.add(new ReplicaWaitingToBeRecovered(blocks[RWR], vol, 
-        vol.createRbwFile(blocks[RWR]).getParentFile()));
-    replicasMap.add(new ReplicaUnderRecovery(
-        new FinalizedReplica(blocks[RUR], vol, vol.getDir()), 2007));    
+    replicasMap.add(bpid, new ReplicaWaitingToBeRecovered(
+        blocks[RWR].getLocalBlock(), vol, vol.createRbwFile(bpid,
+            blocks[RWR].getLocalBlock()).getParentFile()));
+    replicasMap.add(bpid, new ReplicaUnderRecovery(new FinalizedReplica(blocks[RUR]
+        .getLocalBlock(), vol, vol.getDir()), 2007));    
+    
+    return blocks;
   }
   
-  private void testAppend(FSDataset dataSet) throws IOException {
+  private void testAppend(String bpid, FSDataset dataSet, ExtendedBlock[] blocks) throws IOException {
     long newGS = blocks[FINALIZED].getGenerationStamp()+1;
-    FSVolume v = dataSet.volumeMap.get(blocks[FINALIZED]).getVolume();
+    FSVolume v = dataSet.volumeMap.get(bpid, blocks[FINALIZED].getLocalBlock())
+        .getVolume();
     long available = v.getCapacity()-v.getDfsUsed();
     long expectedLen = blocks[FINALIZED].getNumBytes();
     try {
-      v.decDfsUsed(-available);
+      v.decDfsUsed(bpid, -available);
       blocks[FINALIZED].setNumBytes(expectedLen+100);
       dataSet.append(blocks[FINALIZED], newGS, expectedLen);
       Assert.fail("Should not have space to append to an RWR replica" + blocks[RWR]);
@@ -160,7 +180,7 @@ public class TestWriteToReplica {
       Assert.assertTrue(e.getMessage().startsWith(
           "Insufficient space for appending to "));
     }
-    v.decDfsUsed(available);
+    v.decDfsUsed(bpid, available);
     blocks[FINALIZED].setNumBytes(expectedLen);
 
     newGS = blocks[RBW].getGenerationStamp()+1;
@@ -265,7 +285,7 @@ public class TestWriteToReplica {
     }
   }
 
-  private void testClose(FSDataset dataSet) throws IOException {
+  private void testClose(FSDataset dataSet, ExtendedBlock [] blocks) throws IOException {
     long newGS = blocks[FINALIZED].getGenerationStamp()+1;
     dataSet.recoverClose(blocks[FINALIZED], newGS, 
         blocks[FINALIZED].getNumBytes());  // successful
@@ -315,7 +335,7 @@ public class TestWriteToReplica {
     }
   }
   
-  private void testWriteToRbw(FSDataset dataSet) throws IOException {
+  private void testWriteToRbw(FSDataset dataSet, ExtendedBlock[] blocks) throws IOException {
     try {
       dataSet.recoverRbw(blocks[FINALIZED],
           blocks[FINALIZED].getGenerationStamp()+1,
@@ -408,7 +428,7 @@ public class TestWriteToReplica {
     dataSet.createRbw(blocks[NON_EXISTENT]);
   }
   
-  private void testWriteToTemporary(FSDataset dataSet) throws IOException {
+  private void testWriteToTemporary(FSDataset dataSet, ExtendedBlock[] blocks) throws IOException {
     try {
       dataSet.createTemporary(blocks[FINALIZED]);
       Assert.fail("Should not have created a temporary replica that was " +

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Thu May  5 05:40:07 2011
@@ -36,6 +36,7 @@ import org.apache.hadoop.fs.permission.F
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -74,7 +75,7 @@ import org.apache.log4j.LogManager;
  * <li>-logLevel L specifies the logging level when the benchmark runs.
  * The default logging level is {@link Level#ERROR}.</li>
  * <li>-UGCacheRefreshCount G will cause the benchmark to call
- * {@link NameNode#refreshUserToGroupsMappings(Configuration)} after
+ * {@link NameNode#refreshUserToGroupsMappings()} after
  * every G operations, which purges the name-node's user group cache.
  * By default the refresh is never called.</li>
  * <li>-keepResults do not clean up the name-space after execution.</li>
@@ -798,8 +799,9 @@ public class NNThroughputBenchmark {
      */
     void sendHeartbeat() throws IOException {
       // register datanode
-      DatanodeCommand[] cmds = nameNode.sendHeartbeat(
-          dnRegistration, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, 0, 0);
+      // TODO:FEDERATION currently a single block pool is supported
+      DatanodeCommand[] cmds = nameNode.sendHeartbeat(dnRegistration,
+          DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0, 0);
       if(cmds != null) {
         for (DatanodeCommand cmd : cmds ) {
           if(LOG.isDebugEnabled()) {
@@ -842,8 +844,9 @@ public class NNThroughputBenchmark {
     @SuppressWarnings("unused") // keep it for future blockReceived benchmark
     int replicateBlocks() throws IOException {
       // register datanode
-      DatanodeCommand[] cmds = nameNode.sendHeartbeat(
-          dnRegistration, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, 0, 0);
+      // TODO:FEDERATION currently a single block pool is supported
+      DatanodeCommand[] cmds = nameNode.sendHeartbeat(dnRegistration,
+          DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0, 0);
       if (cmds != null) {
         for (DatanodeCommand cmd : cmds) {
           if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
@@ -874,6 +877,7 @@ public class NNThroughputBenchmark {
                           new DataStorage(nsInfo, dnInfo.getStorageID()));
           receivedDNReg.setInfoPort(dnInfo.getInfoPort());
           nameNode.blockReceived( receivedDNReg, 
+                                  nameNode.getNamesystem().getBlockPoolId(),
                                   new Block[] {blocks[i]},
                                   new String[] {DataNode.EMPTY_DEL_HINT});
         }
@@ -969,7 +973,7 @@ public class NNThroughputBenchmark {
         nameNode.create(fileName, FsPermission.getDefault(), clientName,
             new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, replication,
             BLOCK_SIZE);
-        Block lastBlock = addBlocks(fileName, clientName);
+        ExtendedBlock lastBlock = addBlocks(fileName, clientName);
         nameNode.complete(fileName, clientName, lastBlock);
       }
       // prepare block reports
@@ -978,18 +982,19 @@ public class NNThroughputBenchmark {
       }
     }
 
-    private Block addBlocks(String fileName, String clientName)
+    private ExtendedBlock addBlocks(String fileName, String clientName)
     throws IOException {
-      Block prevBlock = null;
+      ExtendedBlock prevBlock = null;
       for(int jdx = 0; jdx < blocksPerFile; jdx++) {
         LocatedBlock loc = nameNode.addBlock(fileName, clientName, prevBlock, null);
         prevBlock = loc.getBlock();
         for(DatanodeInfo dnInfo : loc.getLocations()) {
           int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getName());
-          datanodes[dnIdx].addBlock(loc.getBlock());
+          datanodes[dnIdx].addBlock(loc.getBlock().getLocalBlock());
           nameNode.blockReceived(
               datanodes[dnIdx].dnRegistration, 
-              new Block[] {loc.getBlock()},
+              loc.getBlock().getBlockPoolId(),
+              new Block[] {loc.getBlock().getLocalBlock()},
               new String[] {""});
         }
       }
@@ -1007,7 +1012,8 @@ public class NNThroughputBenchmark {
       assert daemonId < numThreads : "Wrong daemonId.";
       TinyDatanode dn = datanodes[daemonId];
       long start = System.currentTimeMillis();
-      nameNode.blockReport(dn.dnRegistration, dn.getBlockReportList());
+      nameNode.blockReport(dn.dnRegistration, nameNode.getNamesystem()
+          .getBlockPoolId(), dn.getBlockReportList());
       long end = System.currentTimeMillis();
       return end-start;
     }

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java Thu May  5 05:40:07 2011
@@ -140,7 +140,7 @@ public class OfflineEditsViewerHelper {
     // no check, if it's not it throws an exception which is what we want
     DistributedFileSystem dfs =
       (DistributedFileSystem)cluster.getFileSystem();
-    FileContext fc = FileContext.getFileContext(cluster.getURI(), config);
+    FileContext fc = FileContext.getFileContext(cluster.getURI(0), config);
     // OP_ADD 0, OP_SET_GENSTAMP 10
     Path pathFileCreate = new Path("/file_create");
     FSDataOutputStream s = dfs.create(pathFileCreate);

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java Thu May  5 05:40:07 2011
@@ -28,7 +28,6 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.TestHDFSServerPorts;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.namenode.FSImage.CheckpointStates;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -38,8 +37,6 @@ import junit.framework.TestCase;
 public class TestBackupNode extends TestCase {
   public static final Log LOG = LogFactory.getLog(TestBackupNode.class);
 
-  // reset default 0.0.0.0 addresses in order to avoid IPv6 problem
-  static final String THIS_HOST = TestHDFSServerPorts.getFullHostName();
   static final String BASE_DIR = MiniDFSCluster.getBaseDirectory();
 
   protected void setUp() throws Exception {
@@ -138,10 +135,6 @@ public class TestBackupNode extends Test
       //
       // Take a checkpoint
       //
-      conf.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY,
-          THIS_HOST + ":0");
-      conf.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
-          THIS_HOST + ":0");
       backup = startBackupNode(conf, op, 1);
       waitCheckpointDone(backup);
     } catch(IOException e) {
@@ -232,17 +225,13 @@ public class TestBackupNode extends Test
     try {
       // start name-node and backup node 1
       cluster = new MiniDFSCluster.Builder(conf1).numDataNodes(0).build();
-      conf1.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY,
-                THIS_HOST + ":7771");
-      conf1.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
-                THIS_HOST + ":7775");
+      conf1.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, "0.0.0.0:7771");
+      conf1.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, "0.0.0.0:7775");
       backup1 = startBackupNode(conf1, StartupOption.BACKUP, 1);
       // try to start backup node 2
       conf2 = new HdfsConfiguration(conf1);
-      conf2.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY,
-                THIS_HOST + ":7772");
-      conf2.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
-                THIS_HOST + ":7776");
+      conf2.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, "0.0.0.0:7772");
+      conf2.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, "0.0.0.0:7776");
       try {
         backup2 = startBackupNode(conf2, StartupOption.BACKUP, 2);
         backup2.stop();

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java Thu May  5 05:40:07 2011
@@ -31,8 +31,8 @@ import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.security.token.block.*;
 import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
@@ -122,7 +122,7 @@ public class TestBlockTokenWithDFS exten
     InetSocketAddress targetAddr = null;
     Socket s = null;
     BlockReader blockReader = null;
-    Block block = lblock.getBlock();
+    ExtendedBlock block = lblock.getBlock();
     try {
       DatanodeInfo[] nodes = lblock.getLocations();
       targetAddr = NetUtils.createSocketAddr(nodes[0].getName());
@@ -130,7 +130,8 @@ public class TestBlockTokenWithDFS exten
       s.connect(targetAddr, HdfsConstants.READ_TIMEOUT);
       s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
 
-      String file = BlockReader.getFileName(targetAddr, block.getBlockId());
+      String file = BlockReader.getFileName(targetAddr, 
+          "test-blockpoolid", block.getBlockId());
       blockReader = BlockReader.newBlockReader(s, file, block, 
           lblock.getBlockToken(), 0, -1, 
           conf.getInt("io.file.buffer.size", 4096));
@@ -351,7 +352,8 @@ public class TestBlockTokenWithDFS exten
       // read should succeed
       tryRead(conf, lblock, true);
       // use a token with wrong blockID
-      Block wrongBlock = new Block(lblock.getBlock().getBlockId() + 1);
+      ExtendedBlock wrongBlock = new ExtendedBlock(lblock.getBlock()
+          .getBlockPoolId(), lblock.getBlock().getBlockId() + 1);
       lblock.setBlockToken(cluster.getNameNode().getNamesystem()
           .blockTokenSecretManager.generateToken(wrongBlock,
               EnumSet.of(BlockTokenSecretManager.AccessMode.READ)));
@@ -413,7 +415,7 @@ public class TestBlockTokenWithDFS exten
       assertTrue(cluster.restartDataNodes(true));
       cluster.waitActive();
       assertEquals(numDataNodes, cluster.getDataNodes().size());
-      cluster.shutdownNameNode();
+      cluster.shutdownNameNode(0);
 
       // confirm tokens cached in in1 are still valid
       lblocks = DFSTestUtil.getAllBlocks(in1);
@@ -449,8 +451,8 @@ public class TestBlockTokenWithDFS exten
        */
 
       // restart the namenode and then shut it down for test
-      cluster.restartNameNode();
-      cluster.shutdownNameNode();
+      cluster.restartNameNode(0);
+      cluster.shutdownNameNode(0);
 
       // verify blockSeekTo() still works (forced to use cached tokens)
       in1.seek(0);
@@ -469,13 +471,13 @@ public class TestBlockTokenWithDFS exten
        */
 
       // restore the cluster and restart the datanodes for test
-      cluster.restartNameNode();
+      cluster.restartNameNode(0);
       assertTrue(cluster.restartDataNodes(true));
       cluster.waitActive();
       assertEquals(numDataNodes, cluster.getDataNodes().size());
 
       // shutdown namenode so that DFSClient can't get new tokens from namenode
-      cluster.shutdownNameNode();
+      cluster.shutdownNameNode(0);
 
       // verify blockSeekTo() fails (cached tokens become invalid)
       in1.seek(0);
@@ -484,7 +486,7 @@ public class TestBlockTokenWithDFS exten
       assertFalse(checkFile2(in3));
 
       // restart the namenode to allow DFSClient to re-fetch tokens
-      cluster.restartNameNode();
+      cluster.restartNameNode(0);
       // verify blockSeekTo() works again (by transparently re-fetching
       // tokens from namenode)
       in1.seek(0);

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java Thu May  5 05:40:07 2011
@@ -163,7 +163,7 @@ public class TestBlockUnderConstruction 
       final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
       final List<LocatedBlock> blocks = lb.getLocatedBlocks();
       assertEquals(i, blocks.size());
-      final Block b = blocks.get(blocks.size() - 1).getBlock();
+      final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
       assertTrue(b instanceof BlockInfoUnderConstruction);
 
       if (++i < NUM_BLOCKS) {

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlocksWithNotEnoughRacks.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlocksWithNotEnoughRacks.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlocksWithNotEnoughRacks.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlocksWithNotEnoughRacks.java Thu May  5 05:40:07 2011
@@ -28,7 +28,8 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -92,7 +93,7 @@ public class TestBlocksWithNotEnoughRack
       // Create a file with one block with a replication factor of 3
       final FileSystem fs = cluster.getFileSystem();
       DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
-      Block b = DFSTestUtil.getFirstBlock(fs, filePath);
+      ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
       DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 1);
 
       // Add a new datanode on a different rack
@@ -126,7 +127,7 @@ public class TestBlocksWithNotEnoughRack
       // Create a file with one block with a replication factor of 1
       final FileSystem fs = cluster.getFileSystem();
       DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
-      Block b = DFSTestUtil.getFirstBlock(fs, filePath);
+      ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
       DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 0);
 
       REPLICATION_FACTOR = 2;
@@ -160,7 +161,7 @@ public class TestBlocksWithNotEnoughRack
       // Create a file with one block
       final FileSystem fs = cluster.getFileSystem();
       DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
-      Block b = DFSTestUtil.getFirstBlock(fs, filePath);
+      ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
       DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 1);
       
       // Add new datanodes on a different rack and increase the
@@ -200,12 +201,12 @@ public class TestBlocksWithNotEnoughRack
       DFSTestUtil.createFile(fs, filePath, fileLen, REPLICATION_FACTOR, 1L);
       final String fileContent = DFSTestUtil.readFile(fs, filePath);
 
-      Block b = DFSTestUtil.getFirstBlock(fs, filePath);
+      ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
       DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
 
       // Corrupt a replica of the block
       int dnToCorrupt = DFSTestUtil.firstDnWithBlock(cluster, b);
-      assertTrue(cluster.corruptReplica(b.getBlockName(), dnToCorrupt));
+      assertTrue(MiniDFSCluster.corruptReplica(dnToCorrupt, b));
 
       // Restart the datanode so blocks are re-scanned, and the corrupt
       // block is detected.
@@ -220,7 +221,7 @@ public class TestBlocksWithNotEnoughRack
       // Ensure all replicas are valid (the corrupt replica may not
       // have been cleaned up yet).
       for (int i = 0; i < racks.length; i++) {
-        String blockContent = cluster.readBlockOnDataNode(i, b.getBlockName());
+        String blockContent = cluster.readBlockOnDataNode(i, b);
         if (blockContent != null && i != dnToCorrupt) {
           assertEquals("Corrupt replica", fileContent, blockContent);
         }
@@ -248,7 +249,7 @@ public class TestBlocksWithNotEnoughRack
       // Create a file with one block
       final FileSystem fs = cluster.getFileSystem();
       DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
-      Block b = DFSTestUtil.getFirstBlock(fs, filePath);
+      ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
       DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
 
       // Decrease the replication factor, make sure the deleted replica
@@ -282,7 +283,7 @@ public class TestBlocksWithNotEnoughRack
       // Create a file with one block with a replication factor of 2
       final FileSystem fs = cluster.getFileSystem();
       DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
-      Block b = DFSTestUtil.getFirstBlock(fs, filePath);
+      ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
       DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
 
       // Make the last datanode look like it failed to heartbeat by 
@@ -290,8 +291,9 @@ public class TestBlocksWithNotEnoughRack
       ArrayList<DataNode> datanodes = cluster.getDataNodes();
       int idx = datanodes.size() - 1;
       DataNode dataNode = datanodes.get(idx);
+      DatanodeID dnId = dataNode.getDatanodeId();
       cluster.stopDataNode(idx);
-      ns.removeDatanode(dataNode.dnRegistration);
+      ns.removeDatanode(dnId);
 
       // The block should still have sufficient # replicas, across racks.
       // The last node may not have contained a replica, but if it did
@@ -303,8 +305,9 @@ public class TestBlocksWithNotEnoughRack
       datanodes = cluster.getDataNodes();
       idx = datanodes.size() - 1;
       dataNode = datanodes.get(idx);
+      dnId = dataNode.getDatanodeId();
       cluster.stopDataNode(idx);
-      ns.removeDatanode(dataNode.dnRegistration);
+      ns.removeDatanode(dnId);
 
       // Make sure we have enough live replicas even though we are
       // short one rack and therefore need one replica
@@ -334,7 +337,7 @@ public class TestBlocksWithNotEnoughRack
       // Create a file with one block
       final FileSystem fs = cluster.getFileSystem();
       DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
-      Block b = DFSTestUtil.getFirstBlock(fs, filePath);
+      ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
       DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
 
       // Make the last (cross rack) datanode look like it failed
@@ -342,8 +345,9 @@ public class TestBlocksWithNotEnoughRack
       ArrayList<DataNode> datanodes = cluster.getDataNodes();
       assertEquals(3, datanodes.size());
       DataNode dataNode = datanodes.get(2);
+      DatanodeID dnId = dataNode.getDatanodeId();
       cluster.stopDataNode(2);
-      ns.removeDatanode(dataNode.dnRegistration);
+      ns.removeDatanode(dnId);
 
       // The block gets re-replicated to another datanode so it has a 
       // sufficient # replicas, but not across racks, so there should
@@ -394,7 +398,7 @@ public class TestBlocksWithNotEnoughRack
       // Create a file with one block
       final FileSystem fs = cluster.getFileSystem();
       DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
-      Block b = DFSTestUtil.getFirstBlock(fs, filePath);
+      ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
       DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
 
       // Decommission one of the hosts with the block, this should cause 
@@ -443,7 +447,7 @@ public class TestBlocksWithNotEnoughRack
     try {
       final FileSystem fs = cluster.getFileSystem();
       DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
-      Block b = DFSTestUtil.getFirstBlock(fs, filePath);
+      ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
       DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
 
       // Lower the replication factor so the blocks are over replicated
@@ -472,4 +476,4 @@ public class TestBlocksWithNotEnoughRack
       cluster.shutdown();
     }
   }
-}
\ No newline at end of file
+}

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java Thu May  5 05:40:07 2011
@@ -95,7 +95,7 @@ public class TestCheckPointForSecurityTo
       String[] args = new String[]{"-saveNamespace"};
 
       // verify that the edits file is NOT empty
-      Collection<URI> editsDirs = cluster.getNameEditsDirs();
+      Collection<URI> editsDirs = cluster.getNameEditsDirs(0);
       for(URI uri : editsDirs) {
         File ed = new File(uri.getPath());
         Assert.assertTrue(new File(ed, "current/edits").length() > Integer.SIZE/Byte.SIZE);

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java Thu May  5 05:40:07 2011
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.na
 
 import junit.framework.TestCase;
 import java.io.*;
+import java.net.InetSocketAddress;
 import java.net.URI;
 import java.util.Collection;
 import java.util.List;
@@ -26,6 +27,7 @@ import java.util.Iterator;
 import java.util.Random;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -656,7 +658,7 @@ public class TestCheckpoint extends Test
       //
       assertTrue(!fileSys.exists(file1));
       assertTrue(!fileSys.exists(file2));
-      namedirs = cluster.getNameDirs();
+      namedirs = cluster.getNameDirs(0);
 
       //
       // Create file1
@@ -743,7 +745,7 @@ public class TestCheckpoint extends Test
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
       cluster.waitActive();
       fs = (DistributedFileSystem)(cluster.getFileSystem());
-      fc = FileContext.getFileContext(cluster.getURI());
+      fc = FileContext.getFileContext(cluster.getURI(0));
 
       // Saving image without safe mode should fail
       DFSAdmin admin = new DFSAdmin(conf);
@@ -766,7 +768,7 @@ public class TestCheckpoint extends Test
       assertTrue(fc.getFileLinkStatus(symlink).isSymlink());
 
       // verify that the edits file is NOT empty
-      Collection<URI> editsDirs = cluster.getNameEditsDirs();
+      Collection<URI> editsDirs = cluster.getNameEditsDirs(0);
       for(URI uri : editsDirs) {
         File ed = new File(uri.getPath());
         assertTrue(new File(ed, "current/edits").length() > Integer.SIZE/Byte.SIZE);
@@ -793,7 +795,7 @@ public class TestCheckpoint extends Test
       cluster.waitActive();
       fs = (DistributedFileSystem)(cluster.getFileSystem());
       checkFile(fs, file, replication);
-      fc = FileContext.getFileContext(cluster.getURI());
+      fc = FileContext.getFileContext(cluster.getURI(0));
       assertTrue(fc.getFileLinkStatus(symlink).isSymlink());
     } finally {
       if(fs != null) fs.close();
@@ -801,6 +803,85 @@ public class TestCheckpoint extends Test
     }
   }
   
+  /* Test case to test CheckpointSignature */
+  @SuppressWarnings("deprecation")
+  public void testCheckpointSignature() throws IOException {
+
+    MiniDFSCluster cluster = null;
+    Configuration conf = new HdfsConfiguration();
+
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
+        .format(false).build();
+    NameNode nn = cluster.getNameNode();
+
+    SecondaryNameNode secondary = startSecondaryNameNode(conf);
+    // prepare checkpoint image
+    secondary.doCheckpoint();
+    CheckpointSignature sig = nn.rollEditLog();
+    // manipulate the CheckpointSignature fields
+    sig.setBlockpoolID("somerandomebpid");
+    sig.clusterID = "somerandomcid";
+    try {
+      sig.validateStorageInfo(nn.getFSImage()); // this should fail
+      assertTrue("This test is expected to fail.", false);
+    } catch (Exception ignored) {
+    }
+
+    secondary.shutdown();
+    cluster.shutdown();
+  }
+  
+  /**
+   * Starts two namenodes and two secondary namenodes, verifies that secondary
+   * namenodes are configured correctly to talk to their respective namenodes
+   * and can do the checkpoint.
+   * 
+   * @throws IOException
+   */
+  @SuppressWarnings("deprecation")
+  public void testMultipleSecondaryNamenodes() throws IOException {
+    Configuration conf = new HdfsConfiguration();
+    String nameserviceId1 = "ns1";
+    String nameserviceId2 = "ns2";
+    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, nameserviceId1
+        + "," + nameserviceId2);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numNameNodes(2)
+        .nameNodePort(9928).build();
+    Configuration snConf1 = new HdfsConfiguration(cluster.getConfiguration(0));
+    Configuration snConf2 = new HdfsConfiguration(cluster.getConfiguration(1));
+    InetSocketAddress nn1RpcAddress = cluster.getNameNode(0).rpcAddress;
+    InetSocketAddress nn2RpcAddress = cluster.getNameNode(1).rpcAddress;
+    String nn1 = nn1RpcAddress.getHostName() + ":" + nn1RpcAddress.getPort();
+    String nn2 = nn2RpcAddress.getHostName() + ":" + nn2RpcAddress.getPort();
+
+    // Set the Service Rpc address to empty to make sure the node specific
+    // setting works
+    snConf1.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "");
+    snConf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "");
+
+    // Set the nameserviceIds
+    snConf1.set(DFSUtil.getNameServiceIdKey(
+        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId1), nn1);
+    snConf2.set(DFSUtil.getNameServiceIdKey(
+        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId2), nn2);
+
+    SecondaryNameNode secondary1 = startSecondaryNameNode(snConf1);
+    SecondaryNameNode secondary2 = startSecondaryNameNode(snConf2);
+
+    // make sure the two secondary namenodes are talking to correct namenodes.
+    assertEquals(secondary1.getNameNodeAddress().getPort(), nn1RpcAddress.getPort());
+    assertEquals(secondary2.getNameNodeAddress().getPort(), nn2RpcAddress.getPort());
+    assertTrue(secondary1.getNameNodeAddress().getPort() != secondary2
+        .getNameNodeAddress().getPort());
+
+    // both should checkpoint.
+    secondary1.doCheckpoint();
+    secondary2.doCheckpoint();
+    secondary1.shutdown();
+    secondary2.shutdown();
+    cluster.shutdown();
+  }
+  
   /**
    * Simulate a secondary node failure to transfer image
    * back to the name-node.

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java Thu May  5 05:40:07 2011
@@ -32,6 +32,8 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.TestDatanodeBlockScanner;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.junit.Test;
 
 /** A JUnit test for corrupt_files.jsp */
@@ -80,9 +82,8 @@ public class TestCorruptFilesJsp  {
 
       // Now corrupt all the files except for the last one
       for (int idx = 0; idx < filepaths.length - 1; idx++) {
-        String blockName = DFSTestUtil.getFirstBlock(fs, filepaths[idx])
-            .getBlockName();
-        assertTrue(cluster.corruptReplica(blockName, 0));
+        ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, filepaths[idx]);
+        assertTrue(TestDatanodeBlockScanner.corruptReplica(blk, 0));
 
         // read the file so that the corrupt block is reported to NN
         FSDataInputStream in = fs.open(filepaths[idx]);

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDatanodeDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDatanodeDescriptor.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDatanodeDescriptor.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDatanodeDescriptor.java Thu May  5 05:40:07 2011
@@ -21,7 +21,6 @@ import java.util.ArrayList;
 
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
-import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 
 import junit.framework.TestCase;
 
@@ -43,10 +42,10 @@ public class TestDatanodeDescriptor exte
       blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP));
     }
     dd.addBlocksToBeInvalidated(blockList);
-    BlockCommand bc = dd.getInvalidateBlocks(MAX_LIMIT);
-    assertEquals(bc.getBlocks().length, MAX_LIMIT);
+    Block[] bc = dd.getInvalidateBlocks(MAX_LIMIT);
+    assertEquals(bc.length, MAX_LIMIT);
     bc = dd.getInvalidateBlocks(MAX_LIMIT);
-    assertEquals(bc.getBlocks().length, REMAINING_BLOCKS);
+    assertEquals(bc.length, REMAINING_BLOCKS);
   }
   
   public void testBlocksCounter() throws Exception {

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java Thu May  5 05:40:07 2011
@@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.HdfsConfig
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
@@ -86,10 +87,12 @@ public class TestDeadDatanode {
     cluster = new MiniDFSCluster.Builder(conf).build();
     cluster.waitActive();
 
+    String poolId = cluster.getNamesystem().getBlockPoolId();
     // wait for datanode to be marked live
     DataNode dn = cluster.getDataNodes().get(0);
-    DatanodeRegistration reg = cluster.getDataNodes().get(0)
-        .getDatanodeRegistration();
+    DatanodeRegistration reg = 
+      DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
+      
     waitForDatanodeState(reg.getStorageID(), true, 20000);
 
     // Shutdown and wait for datanode to be marked dead
@@ -97,13 +100,13 @@ public class TestDeadDatanode {
     waitForDatanodeState(reg.getStorageID(), false, 20000);
 
     DatanodeProtocol dnp = cluster.getNameNode();
-    Block block = new Block(0);
-    Block[] blocks = new Block[] { block };
+    
+    Block[] blocks = new Block[] { new Block(0) };
     String[] delHints = new String[] { "" };
     
     // Ensure blockReceived call from dead datanode is rejected with IOException
     try {
-      dnp.blockReceived(reg, blocks, delHints);
+      dnp.blockReceived(reg, poolId, blocks, delHints);
       Assert.fail("Expected IOException is not thrown");
     } catch (IOException ex) {
       // Expected
@@ -112,7 +115,7 @@ public class TestDeadDatanode {
     // Ensure blockReport from dead datanode is rejected with IOException
     long[] blockReport = new long[] { 0L, 0L, 0L };
     try {
-      dnp.blockReport(reg, blockReport);
+      dnp.blockReport(reg, poolId, blockReport);
       Assert.fail("Expected IOException is not thrown");
     } catch (IOException ex) {
       // Expected
@@ -120,7 +123,7 @@ public class TestDeadDatanode {
 
     // Ensure heartbeat from dead datanode is rejected with a command
     // that asks datanode to register again
-    DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, 0, 0, 0, 0, 0);
+    DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, 0, 0, 0, 0, 0, 0, 0);
     Assert.assertEquals(1, cmd.length);
     Assert.assertEquals(cmd[0].getAction(), DatanodeCommand.REGISTER
         .getAction());

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Thu May  5 05:40:07 2011
@@ -177,7 +177,7 @@ public class TestEditLog extends TestCas
       fileSys = cluster.getFileSystem();
       final FSNamesystem namesystem = cluster.getNamesystem();
   
-      for (Iterator<URI> it = cluster.getNameDirs().iterator(); it.hasNext(); ) {
+      for (Iterator<URI> it = cluster.getNameDirs(0).iterator(); it.hasNext(); ) {
         File dir = new File(it.next().getPath());
         System.out.println(dir);
       }

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java Thu May  5 05:40:07 2011
@@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
+import org.apache.hadoop.test.GenericTestUtils;
 
 import static org.junit.Assert.*;
 import org.junit.Test;
@@ -333,7 +334,7 @@ public class TestEditLogRace {
   public void testSaveImageWhileSyncInProgress() throws Exception {
     Configuration conf = getConf();
     NameNode.initMetrics(conf, NamenodeRole.ACTIVE);
-    NameNode.format(conf);
+    GenericTestUtils.formatNamenode(conf);
     final FSNamesystem namesystem = new FSNamesystem(conf);
 
     try {
@@ -425,7 +426,7 @@ public class TestEditLogRace {
   public void testSaveRightBeforeSync() throws Exception {
     Configuration conf = getConf();
     NameNode.initMetrics(conf, NamenodeRole.ACTIVE);
-    NameNode.format(conf);
+    GenericTestUtils.formatNamenode(conf);
     final FSNamesystem namesystem = new FSNamesystem(conf);
 
     try {

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=1099687&r1=1099686&r2=1099687&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Thu May  5 05:40:07 2011
@@ -28,7 +28,6 @@ import java.io.RandomAccessFile;
 import java.net.InetSocketAddress;
 import java.nio.channels.FileChannel;
 import java.security.PrivilegedExceptionAction;
-import java.util.Collection;
 import java.util.Random;
 import java.util.regex.Pattern;
 
@@ -45,6 +44,7 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.tools.DFSck;
@@ -247,15 +247,11 @@ public class TestFsck extends TestCase {
       String[] fileNames = util.getFileNames(topDir);
       DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
                                           cluster.getNameNodePort()), conf);
-      String block = dfsClient.getNamenode().
-                      getBlockLocations(fileNames[0], 0, Long.MAX_VALUE).
-                      get(0).getBlock().getBlockName();
-      File baseDir = new File(System.getProperty("test.build.data",
-                                                 "build/test/data"),"dfs/data");
-      for (int i=0; i<8; i++) {
-        File blockFile = new File(baseDir, "data" +(i+1) + 
-            MiniDFSCluster.FINALIZED_DIR_NAME + block);
-        if(blockFile.exists()) {
+      ExtendedBlock block = dfsClient.getNamenode().getBlockLocations(
+          fileNames[0], 0, Long.MAX_VALUE).get(0).getBlock();
+      for (int i=0; i<4; i++) {
+        File blockFile = MiniDFSCluster.getBlockFile(i, block);
+        if(blockFile != null && blockFile.exists()) {
           assertTrue(blockFile.delete());
         }
       }
@@ -355,7 +351,7 @@ public class TestFsck extends TestCase {
     DFSTestUtil.createFile(fs, file1, 1024, (short)3, 0);
     // Wait until file replication has completed
     DFSTestUtil.waitReplication(fs, file1, (short)3);
-    String block = DFSTestUtil.getFirstBlock(fs, file1).getBlockName();
+    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file1);
 
     // Make sure filesystem is in healthy state
     outStr = runFsck(conf, 0, true, "/");
@@ -363,12 +359,9 @@ public class TestFsck extends TestCase {
     assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
     
     // corrupt replicas 
-    File baseDir = new File(System.getProperty("test.build.data",
-                                               "build/test/data"),"dfs/data");
-    for (int i=0; i < 6; i++) {
-      File blockFile = new File(baseDir, "data" + (i+1) + 
-          MiniDFSCluster.FINALIZED_DIR_NAME + block);
-      if (blockFile.exists()) {
+    for (int i=0; i < 3; i++) {
+      File blockFile = MiniDFSCluster.getBlockFile(i, block);
+      if (blockFile != null && blockFile.exists()) {
         RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
         FileChannel channel = raFile.getChannel();
         String badString = "BADBAD";
@@ -470,19 +463,21 @@ public class TestFsck extends TestCase {
       System.out.println("1. good fsck out: " + outStr);
       assertTrue(outStr.contains("has 0 CORRUPT files"));
       // delete the blocks
-      File baseDir = new File(System.getProperty("test.build.data",
-      "build/test/data"),"dfs/data");
-      for (int i=0; i<8; i++) {
-        File data_dir = new File(baseDir, "data" +(i+1)+ MiniDFSCluster.FINALIZED_DIR_NAME);
-        File[] blocks = data_dir.listFiles();
-        if (blocks == null)
-          continue;
-
-        for (int idx = 0; idx < blocks.length; idx++) {
-          if (!blocks[idx].getName().startsWith("blk_")) {
+      final String bpid = cluster.getNamesystem().getBlockPoolId();
+      for (int i=0; i<4; i++) {
+        for (int j=0; j<=1; j++) {
+          File storageDir = MiniDFSCluster.getStorageDir(i, j);
+          File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
+          File[] blocks = data_dir.listFiles();
+          if (blocks == null)
             continue;
+  
+          for (int idx = 0; idx < blocks.length; idx++) {
+            if (!blocks[idx].getName().startsWith("blk_")) {
+              continue;
+            }
+            assertTrue("Cannot remove file.", blocks[idx].delete());
           }
-          assertTrue("Cannot remove file.", blocks[idx].delete());
         }
       }
 



Mime
View raw message