hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a..@apache.org
Subject svn commit: r1556076 [8/8] - in /hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/protocol/ src/main/java/org/apache/hadoop/hdfs/protocolPB/ src/main/java/org...
Date Mon, 06 Jan 2014 23:58:36 GMT
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java?rev=1556076&r1=1556075&r2=1556076&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java Mon Jan  6 23:58:33 2014
@@ -17,7 +17,9 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
+import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 
 import java.io.File;
@@ -25,6 +27,7 @@ import java.io.FilenameFilter;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Map;
 import java.util.Random;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeoutException;
@@ -88,7 +91,7 @@ public class TestBlockReport {
   private MiniDFSCluster cluster;
   private DistributedFileSystem fs;
 
-  Random rand = new Random(RAND_LIMIT);
+  private static Random rand = new Random(RAND_LIMIT);
 
   private static Configuration conf;
 
@@ -112,6 +115,48 @@ public class TestBlockReport {
     cluster.shutdown();
   }
 
+  // Generate a block report, optionally corrupting the generation
+  // stamp and/or length of one block.
+  private static StorageBlockReport[] getBlockReports(
+      DataNode dn, String bpid, boolean corruptOneBlockGs,
+      boolean corruptOneBlockLen) {
+    Map<DatanodeStorage, BlockListAsLongs> perVolumeBlockLists =
+        dn.getFSDataset().getBlockReports(bpid);
+
+    // Send block report
+    StorageBlockReport[] reports =
+        new StorageBlockReport[perVolumeBlockLists.size()];
+    boolean corruptedGs = false;
+    boolean corruptedLen = false;
+
+    int reportIndex = 0;
+    for(Map.Entry<DatanodeStorage, BlockListAsLongs> kvPair : perVolumeBlockLists.entrySet()) {
+      DatanodeStorage dnStorage = kvPair.getKey();
+      BlockListAsLongs blockList = kvPair.getValue();
+
+      // Walk the list of blocks until we find one each to corrupt the
+      // generation stamp and length, if so requested.
+      for (int i = 0; i < blockList.getNumberOfBlocks(); ++i) {
+        if (corruptOneBlockGs && !corruptedGs) {
+          blockList.corruptBlockGSForTesting(i, rand);
+          LOG.info("Corrupted the GS for block ID " + i);
+          corruptedGs = true;
+        } else if (corruptOneBlockLen && !corruptedLen) {
+          blockList.corruptBlockLengthForTesting(i, rand);
+          LOG.info("Corrupted the length for block ID " + i);
+          corruptedLen = true;
+        } else {
+          break;
+        }
+      }
+
+      reports[reportIndex++] =
+          new StorageBlockReport(dnStorage, blockList.getBlockListAsLongs());
+    }
+
+    return reports;
+  }
+
   /**
    * Test write a file, verifies and closes it. Then the length of the blocks
    * are messed up and BlockReport is forced.
@@ -152,10 +197,8 @@ public class TestBlockReport {
     DataNode dn = cluster.getDataNodes().get(DN_N0);
     String poolId = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
-    StorageBlockReport[] report = { new StorageBlockReport(
-        new DatanodeStorage(dnR.getStorageID()),
-        new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
-    cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
+    StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
+    cluster.getNameNodeRpc().blockReport(dnR, poolId, reports);
 
     List<LocatedBlock> blocksAfterReport =
       DFSTestUtil.getAllBlocks(fs.open(filePath));
@@ -210,7 +253,6 @@ public class TestBlockReport {
     for (Integer aRemovedIndex : removedIndex) {
       blocks2Remove.add(lBlocks.get(aRemovedIndex).getBlock());
     }
-    ArrayList<Block> blocks = locatedToBlocks(lBlocks, removedIndex);
 
     if(LOG.isDebugEnabled()) {
       LOG.debug("Number of blocks allocated " + lBlocks.size());
@@ -224,8 +266,11 @@ public class TestBlockReport {
       for (File f : findAllFiles(dataDir,
         new MyFileFilter(b.getBlockName(), true))) {
         DataNodeTestUtils.getFSDataset(dn0).unfinalizeBlock(b);
-        if (!f.delete())
+        if (!f.delete()) {
           LOG.warn("Couldn't delete " + b.getBlockName());
+        } else {
+          LOG.debug("Deleted file " + f.toString());
+        }
       }
     }
 
@@ -234,10 +279,8 @@ public class TestBlockReport {
     // all blocks belong to the same file, hence same BP
     String poolId = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn0.getDNRegistrationForBP(poolId);
-    StorageBlockReport[] report = { new StorageBlockReport(
-        new DatanodeStorage(dnR.getStorageID()),
-        new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
-    cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
+    StorageBlockReport[] reports = getBlockReports(dn0, poolId, false, false);
+    cluster.getNameNodeRpc().blockReport(dnR, poolId, reports);
 
     BlockManagerTestUtil.getComputedDatanodeWork(cluster.getNamesystem()
         .getBlockManager());
@@ -252,9 +295,8 @@ public class TestBlockReport {
 
 
   /**
-   * Test writes a file and closes it. Then test finds a block
-   * and changes its GS to be < of original one.
-   * New empty block is added to the list of blocks.
+   * Test writes a file and closes it.
+   * Block reported is generated with a bad GS for a single block.
    * Block report is forced and the check for # of corrupted blocks is performed.
    *
    * @throws IOException in case of an error
@@ -263,50 +305,64 @@ public class TestBlockReport {
   public void blockReport_03() throws IOException {
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path filePath = new Path("/" + METHOD_NAME + ".dat");
-
-    ArrayList<Block> blocks =
-      prepareForRide(filePath, METHOD_NAME, FILE_SIZE);
-
-    // The block with modified GS won't be found. Has to be deleted
-    blocks.get(0).setGenerationStamp(rand.nextLong());
-    // This new block is unknown to NN and will be mark for deletion.
-    blocks.add(new Block());
+    ArrayList<Block> blocks = writeFile(METHOD_NAME, FILE_SIZE, filePath);
     
     // all blocks belong to the same file, hence same BP
     DataNode dn = cluster.getDataNodes().get(DN_N0);
     String poolId = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
-    StorageBlockReport[] report = { new StorageBlockReport(
-        new DatanodeStorage(dnR.getStorageID()),
-        new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
+    StorageBlockReport[] reports = getBlockReports(dn, poolId, true, false);
     DatanodeCommand dnCmd =
-      cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
+      cluster.getNameNodeRpc().blockReport(dnR, poolId, reports);
     if(LOG.isDebugEnabled()) {
       LOG.debug("Got the command: " + dnCmd);
     }
     printStats();
 
-    assertEquals("Wrong number of CorruptedReplica+PendingDeletion " +
-      "blocks is found", 2,
-        cluster.getNamesystem().getCorruptReplicaBlocks() +
-        cluster.getNamesystem().getPendingDeletionBlocks());
+    assertThat("Wrong number of corrupt blocks",
+               cluster.getNamesystem().getCorruptReplicaBlocks(), is(1L));
+    assertThat("Wrong number of PendingDeletion blocks",
+               cluster.getNamesystem().getPendingDeletionBlocks(), is(0L));
   }
 
   /**
-   * This test isn't a representative case for BlockReport
-   * The empty method is going to be left here to keep the naming
-   * of the test plan in synch with the actual implementation
+   * Test writes a file and closes it.
+   * Block reported is generated with an extra block.
+   * Block report is forced and the check for # of pendingdeletion
+   * blocks is performed.
+   *
+   * @throws IOException in case of an error
    */
-  public void blockReport_04() {
-  }
+  @Test
+  public void blockReport_04() throws IOException {
+    final String METHOD_NAME = GenericTestUtils.getMethodName();
+    Path filePath = new Path("/" + METHOD_NAME + ".dat");
+    DFSTestUtil.createFile(fs, filePath,
+                           FILE_SIZE, REPL_FACTOR, rand.nextLong());
+
 
-  // Client requests new block from NN. The test corrupts this very block
-  // and forces new block report.
-  // The test case isn't specific for BlockReport because it relies on
-  // BlockScanner which is out of scope of this test
-  // Keeping the name to be in synch with the test plan
-  //
-  public void blockReport_05() {
+    DataNode dn = cluster.getDataNodes().get(DN_N0);
+    // all blocks belong to the same file, hence same BP
+    String poolId = cluster.getNamesystem().getBlockPoolId();
+
+    // Create a bogus new block which will not be present on the namenode.
+    ExtendedBlock b = new ExtendedBlock(
+        poolId, rand.nextLong(), 1024L, rand.nextLong());
+    dn.getFSDataset().createRbw(b);
+
+    DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
+    StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
+    DatanodeCommand dnCmd =
+        cluster.getNameNodeRpc().blockReport(dnR, poolId, reports);
+    if(LOG.isDebugEnabled()) {
+      LOG.debug("Got the command: " + dnCmd);
+    }
+    printStats();
+
+    assertThat("Wrong number of corrupt blocks",
+               cluster.getNamesystem().getCorruptReplicaBlocks(), is(0L));
+    assertThat("Wrong number of PendingDeletion blocks",
+               cluster.getNamesystem().getPendingDeletionBlocks(), is(1L));
   }
 
   /**
@@ -323,17 +379,15 @@ public class TestBlockReport {
     Path filePath = new Path("/" + METHOD_NAME + ".dat");
     final int DN_N1 = DN_N0 + 1;
 
-    ArrayList<Block> blocks = writeFile(METHOD_NAME, FILE_SIZE, filePath);
+    writeFile(METHOD_NAME, FILE_SIZE, filePath);
     startDNandWait(filePath, true);
 
- // all blocks belong to the same file, hence same BP
+    // all blocks belong to the same file, hence same BP
     DataNode dn = cluster.getDataNodes().get(DN_N1);
     String poolId = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
-    StorageBlockReport[] report = { new StorageBlockReport(
-        new DatanodeStorage(dnR.getStorageID()),
-        new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
-    cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
+    StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
+    cluster.getNameNodeRpc().blockReport(dnR, poolId, reports);
     printStats();
     assertEquals("Wrong number of PendingReplication Blocks",
       0, cluster.getNamesystem().getUnderReplicatedBlocks());
@@ -353,68 +407,40 @@ public class TestBlockReport {
    * @throws IOException in case of an error
    */
   @Test
-  // Currently this test is failing as expected 'cause the correct behavior is
-  // not yet implemented (9/15/09)
   public void blockReport_07() throws Exception {
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path filePath = new Path("/" + METHOD_NAME + ".dat");
     final int DN_N1 = DN_N0 + 1;
 
     // write file and start second node to be "older" than the original
-    ArrayList<Block> blocks = writeFile(METHOD_NAME, FILE_SIZE, filePath);
+    writeFile(METHOD_NAME, FILE_SIZE, filePath);
     startDNandWait(filePath, true);
 
-    int randIndex = rand.nextInt(blocks.size());
-    // Get a block and screw its GS
-    Block corruptedBlock = blocks.get(randIndex);
-    String secondNode = cluster.getDataNodes().get(DN_N1).getStorageId();
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("Working with " + secondNode);
-      LOG.debug("BlockGS before " + blocks.get(randIndex).getGenerationStamp());
-    }
-    corruptBlockGS(corruptedBlock);
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("BlockGS after " + blocks.get(randIndex).getGenerationStamp());
-      LOG.debug("Done corrupting GS of " + corruptedBlock.getBlockName());
-    }
     // all blocks belong to the same file, hence same BP
     DataNode dn = cluster.getDataNodes().get(DN_N1);
     String poolId = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
-    StorageBlockReport[] report = { new StorageBlockReport(
-        new DatanodeStorage(dnR.getStorageID()),
-        new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
-    cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
+    StorageBlockReport[] reports = getBlockReports(dn, poolId, true, false);
+    cluster.getNameNodeRpc().blockReport(dnR, poolId, reports);
     printStats();
-    assertEquals("Wrong number of Corrupted blocks",
-      1, cluster.getNamesystem().getCorruptReplicaBlocks() +
-// the following might have to be added into the equation if 
-// the same block could be in two different states at the same time
-// and then the expected number of has to be changed to '2'        
-//        cluster.getNamesystem().getPendingReplicationBlocks() +
-        cluster.getNamesystem().getPendingDeletionBlocks());
-
-    // Get another block and screw its length to be less than original
-    if (randIndex == 0)
-      randIndex++;
-    else
-      randIndex--;
-    corruptedBlock = blocks.get(randIndex);
-    corruptBlockLen(corruptedBlock);
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("Done corrupting length of " + corruptedBlock.getBlockName());
-    }
-    
-    report[0] = new StorageBlockReport(
-        new DatanodeStorage(dnR.getStorageID()),
-        new BlockListAsLongs(blocks, null).getBlockListAsLongs());
-    cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
+
+    assertThat("Wrong number of corrupt blocks",
+               cluster.getNamesystem().getCorruptReplicaBlocks(), is(1L));
+    assertThat("Wrong number of PendingDeletion blocks",
+               cluster.getNamesystem().getPendingDeletionBlocks(), is(0L));
+    assertThat("Wrong number of PendingReplication blocks",
+               cluster.getNamesystem().getPendingReplicationBlocks(), is(0L));
+
+    reports = getBlockReports(dn, poolId, true, true);
+    cluster.getNameNodeRpc().blockReport(dnR, poolId, reports);
     printStats();
 
-    assertEquals("Wrong number of Corrupted blocks",
-      2, cluster.getNamesystem().getCorruptReplicaBlocks() +
-        cluster.getNamesystem().getPendingReplicationBlocks() +
-        cluster.getNamesystem().getPendingDeletionBlocks());
+    assertThat("Wrong number of corrupt blocks",
+               cluster.getNamesystem().getCorruptReplicaBlocks(), is(2L));
+    assertThat("Wrong number of PendingDeletion blocks",
+               cluster.getNamesystem().getPendingDeletionBlocks(), is(0L));
+    assertThat("Wrong number of PendingReplication blocks",
+               cluster.getNamesystem().getPendingReplicationBlocks(), is(0L));
 
     printStats();
 
@@ -457,9 +483,7 @@ public class TestBlockReport {
       DataNode dn = cluster.getDataNodes().get(DN_N1);
       String poolId = cluster.getNamesystem().getBlockPoolId();
       DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
-      StorageBlockReport[] report = { new StorageBlockReport(
-          new DatanodeStorage(dnR.getStorageID()),
-          new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
+      StorageBlockReport[] report = getBlockReports(dn, poolId, false, false);
       cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
       printStats();
       assertEquals("Wrong number of PendingReplication blocks",
@@ -490,14 +514,11 @@ public class TestBlockReport {
     // write file and start second node to be "older" than the original
 
     try {
-      ArrayList<Block> blocks =
-        writeFile(METHOD_NAME, 12 * bytesChkSum, filePath);
+      writeFile(METHOD_NAME, 12 * bytesChkSum, filePath);
 
       Block bl = findBlock(filePath, 12 * bytesChkSum);
       BlockChecker bc = new BlockChecker(filePath);
       bc.start();
-      corruptBlockGS(bl);
-      corruptBlockLen(bl);
 
       waitForTempReplica(bl, DN_N1);
                                                 
@@ -505,9 +526,7 @@ public class TestBlockReport {
       DataNode dn = cluster.getDataNodes().get(DN_N1);
       String poolId = cluster.getNamesystem().getBlockPoolId();
       DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
-      StorageBlockReport[] report = { new StorageBlockReport(
-          new DatanodeStorage(dnR.getStorageID()),
-          new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
+      StorageBlockReport[] report = getBlockReports(dn, poolId, true, true);
       cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
       printStats();
       assertEquals("Wrong number of PendingReplication blocks",
@@ -783,38 +802,6 @@ public class TestBlockReport {
     ((Log4JLogger) TestBlockReport.LOG).getLogger().setLevel(Level.ALL);
   }
 
-  private void corruptBlockLen(final Block block)
-    throws IOException {
-    if (block == null) {
-      throw new IOException("Block isn't suppose to be null");
-    }
-    long oldLen = block.getNumBytes();
-    long newLen = oldLen - rand.nextLong();
-    assertTrue("Old and new length shouldn't be the same",
-      block.getNumBytes() != newLen);
-    block.setNumBytes(newLen);
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("Length of " + block.getBlockName() +
-          " is changed to " + newLen + " from " + oldLen);
-    }
-  }
-
-  private void corruptBlockGS(final Block block)
-    throws IOException {
-    if (block == null) {
-      throw new IOException("Block isn't suppose to be null");
-    }
-    long oldGS = block.getGenerationStamp();
-    long newGS = oldGS - rand.nextLong();
-    assertTrue("Old and new GS shouldn't be the same",
-      block.getGenerationStamp() != newGS);
-    block.setGenerationStamp(newGS);
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("Generation stamp of " + block.getBlockName() +
-          " is changed to " + block.getGenerationStamp() + " from " + oldGS);
-    }
-  }
-
   private Block findBlock(Path path, long size) throws IOException {
     Block ret;
       List<LocatedBlock> lbs =

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java?rev=1556076&r1=1556075&r2=1556076&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java Mon Jan  6 23:58:33 2014
@@ -19,12 +19,14 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 import java.io.*;
-import java.net.URI;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
+import java.util.*;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.StorageType;
 import org.junit.Test;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
+import static org.hamcrest.CoreMatchers.is;
 import static org.junit.Assert.*;
 import static org.mockito.Mockito.*;
 
@@ -34,19 +36,69 @@ import org.apache.hadoop.hdfs.server.dat
 
 public class TestDataDirs {
 
-  @Test (timeout = 10000)
-  public void testGetDataDirsFromURIs() throws Throwable {
+  @Test (timeout = 30000)
+  public void testDataDirParsing() throws Throwable {
+    Configuration conf = new Configuration();
+    List<StorageLocation> locations;
+    File dir0 = new File("/dir0");
+    File dir1 = new File("/dir1");
+    File dir2 = new File("/dir2");
+    File dir3 = new File("/dir3");
+
+    // Verify that a valid string is correctly parsed, and that storage
+    // type is not case-sensitive
+    String locations1 = "[disk]/dir0,[DISK]/dir1,[sSd]/dir2,[disK]/dir3";
+    conf.set(DFS_DATANODE_DATA_DIR_KEY, locations1);
+    locations = DataNode.getStorageLocations(conf);
+    assertThat(locations.size(), is(4));
+    assertThat(locations.get(0).getStorageType(), is(StorageType.DISK));
+    assertThat(locations.get(0).getUri(), is(dir0.toURI()));
+    assertThat(locations.get(1).getStorageType(), is(StorageType.DISK));
+    assertThat(locations.get(1).getUri(), is(dir1.toURI()));
+    assertThat(locations.get(2).getStorageType(), is(StorageType.SSD));
+    assertThat(locations.get(2).getUri(), is(dir2.toURI()));
+    assertThat(locations.get(3).getStorageType(), is(StorageType.DISK));
+    assertThat(locations.get(3).getUri(), is(dir3.toURI()));
+
+    // Verify that an unrecognized storage type result in an exception.
+    String locations2 = "[BadMediaType]/dir0,[ssd]/dir1,[disk]/dir2";
+    conf.set(DFS_DATANODE_DATA_DIR_KEY, locations2);
+    try {
+      locations = DataNode.getStorageLocations(conf);
+      fail();
+    } catch(IllegalArgumentException iae) {
+      DataNode.LOG.info("The exception is expected.", iae);
+    }
+
+    // Assert that a string with no storage type specified is
+    // correctly parsed and the default storage type is picked up.
+    String locations3 = "/dir0,/dir1";
+    conf.set(DFS_DATANODE_DATA_DIR_KEY, locations3);
+    locations = DataNode.getStorageLocations(conf);
+    assertThat(locations.size(), is(2));
+    assertThat(locations.get(0).getStorageType(), is(StorageType.DISK));
+    assertThat(locations.get(0).getUri(), is(dir0.toURI()));
+    assertThat(locations.get(1).getStorageType(), is(StorageType.DISK));
+    assertThat(locations.get(1).getUri(), is(dir1.toURI()));
+  }
+
+  @Test (timeout = 30000)
+  public void testDataDirValidation() throws Throwable {
     
     DataNodeDiskChecker diskChecker = mock(DataNodeDiskChecker.class);
     doThrow(new IOException()).doThrow(new IOException()).doNothing()
       .when(diskChecker).checkDir(any(LocalFileSystem.class), any(Path.class));
     LocalFileSystem fs = mock(LocalFileSystem.class);
-    Collection<URI> uris = Arrays.asList(new URI("file:/p1/"),
-        new URI("file:/p2/"), new URI("file:/p3/"));
+    AbstractList<StorageLocation> locations = new ArrayList<StorageLocation>();
 
-    List<File> dirs = DataNode.getDataDirsFromURIs(uris, fs, diskChecker);
-    assertEquals("number of valid data dirs", 1, dirs.size());
-    String validDir = dirs.iterator().next().getPath();
-    assertEquals("p3 should be valid", new File("/p3").getPath(), validDir);
+    locations.add(StorageLocation.parse("file:/p1/"));
+    locations.add(StorageLocation.parse("file:/p2/"));
+    locations.add(StorageLocation.parse("file:/p3/"));
+
+    List<StorageLocation> checkedLocations =
+        DataNode.checkStorageLocations(locations, fs, diskChecker);
+    assertEquals("number of valid data dirs", 1, checkedLocations.size());
+    String validDir = checkedLocations.iterator().next().getFile().getPath();
+    assertThat("p3 should be valid", new File("/p3/").getPath(), is(validDir));
   }
 }

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java?rev=1556076&r1=1556075&r2=1556076&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java Mon Jan  6 23:58:33 2014
@@ -163,7 +163,7 @@ public class TestDataNodeMultipleRegistr
 
       for (BPOfferService bpos : dn.getAllBpOs()) {
         LOG.info("reg: bpid=" + "; name=" + bpos.bpRegistration + "; sid="
-            + bpos.bpRegistration.getStorageID() + "; nna=" +
+            + bpos.bpRegistration.getDatanodeUuid() + "; nna=" +
             getNNSocketAddress(bpos));
       }
 

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java?rev=1556076&r1=1556075&r2=1556076&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java Mon Jan  6 23:58:33 2014
@@ -42,11 +42,13 @@ import org.apache.hadoop.hdfs.HdfsConfig
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.net.TcpPeerServer;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
@@ -151,13 +153,23 @@ public class TestDataNodeVolumeFailure {
     DataNode dn = cluster.getDataNodes().get(1); //corresponds to dir data3
     String bpid = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(bpid);
-    final StorageBlockReport[] report = {
-        new StorageBlockReport(
-            new DatanodeStorage(dnR.getStorageID()),
-            DataNodeTestUtils.getFSDataset(dn).getBlockReport(bpid
-                ).getBlockListAsLongs())
-    };
-    cluster.getNameNodeRpc().blockReport(dnR, bpid, report);
+    
+    Map<DatanodeStorage, BlockListAsLongs> perVolumeBlockLists =
+        dn.getFSDataset().getBlockReports(bpid);
+
+    // Send block report
+    StorageBlockReport[] reports =
+        new StorageBlockReport[perVolumeBlockLists.size()];
+
+    int reportIndex = 0;
+    for(Map.Entry<DatanodeStorage, BlockListAsLongs> kvPair : perVolumeBlockLists.entrySet()) {
+        DatanodeStorage dnStorage = kvPair.getKey();
+        BlockListAsLongs blockList = kvPair.getValue();
+        reports[reportIndex++] =
+            new StorageBlockReport(dnStorage, blockList.getBlockListAsLongs());
+    }
+    
+    cluster.getNameNodeRpc().blockReport(dnR, bpid, reports);
 
     // verify number of blocks and files...
     verify(filename, filesize);

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java?rev=1556076&r1=1556075&r2=1556076&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java Mon Jan  6 23:58:33 2014
@@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
@@ -406,6 +407,16 @@ public class TestDirectoryScanner {
     public File getFinalizedDir(String bpid) throws IOException {
       return new File("/base/current/" + bpid + "/finalized");
     }
+
+    @Override
+    public StorageType getStorageType() {
+      return StorageType.DEFAULT;
+    }
+
+    @Override
+    public String getStorageID() {
+      return "";
+    }
   }
 
   private final static TestFsVolumeSpi TEST_VOLUME = new TestFsVolumeSpi();
@@ -436,7 +447,7 @@ public class TestDirectoryScanner {
   
   void testScanInfoObject(long blockId) throws Exception {
     DirectoryScanner.ScanInfo scanInfo =
-        new DirectoryScanner.ScanInfo(blockId);
+        new DirectoryScanner.ScanInfo(blockId, null, null, null);
     assertEquals(blockId, scanInfo.getBlockId());
     assertNull(scanInfo.getBlockFile());
     assertNull(scanInfo.getMetaFile());

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java?rev=1556076&r1=1556075&r2=1556076&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java Mon Jan  6 23:58:33 2014
@@ -311,7 +311,7 @@ public class TestSimulatedFSDataset {
   }
   
   private SimulatedFSDataset getSimulatedFSDataset() {
-    SimulatedFSDataset fsdataset = new SimulatedFSDataset(null, null, conf); 
+    SimulatedFSDataset fsdataset = new SimulatedFSDataset(null, conf);
     fsdataset.addBlockPool(bpid, conf);
     return fsdataset;
   }

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1556076&r1=1556075&r2=1556076&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Mon Jan  6 23:58:33 2014
@@ -17,6 +17,14 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.EnumSet;
+import java.util.List;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -25,28 +33,40 @@ import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.protocol.*;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
-import org.apache.hadoop.hdfs.server.protocol.*;
+import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
+import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
+import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
+import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.security.Groups;
-import org.apache.hadoop.util.*;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+import org.apache.hadoop.util.VersionInfo;
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
 
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.EnumSet;
-import java.util.List;
-
 /**
  * Main class for a series of name-node benchmarks.
  * 
@@ -909,17 +929,16 @@ public class NNThroughputBenchmark imple
       dnRegistration = new DatanodeRegistration(
           new DatanodeID(DNS.getDefaultIP("default"),
               DNS.getDefaultHost("default", "default"),
-              "", getNodePort(dnIdx),
+              DataNode.generateUuid(), getNodePort(dnIdx),
               DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
               DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
               DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
-          new DataStorage(nsInfo, ""),
+          new DataStorage(nsInfo),
           new ExportedBlockKeys(), VersionInfo.getVersion());
-      DataNode.setNewStorageID(dnRegistration);
       // register datanode
       dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
       //first block reports
-      storage = new DatanodeStorage(dnRegistration.getStorageID());
+      storage = new DatanodeStorage(dnRegistration.getDatanodeUuid());
       final StorageBlockReport[] reports = {
           new StorageBlockReport(storage,
               new BlockListAsLongs(null, null).getBlockListAsLongs())
@@ -935,7 +954,7 @@ public class NNThroughputBenchmark imple
     void sendHeartbeat() throws IOException {
       // register datanode
       // TODO:FEDERATION currently a single block pool is supported
-      StorageReport[] rep = { new StorageReport(dnRegistration.getStorageID(),
+      StorageReport[] rep = { new StorageReport(dnRegistration.getDatanodeUuid(),
           false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
       DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
           rep, 0, 0, 0).getCommands();
@@ -982,7 +1001,7 @@ public class NNThroughputBenchmark imple
     @SuppressWarnings("unused") // keep it for future blockReceived benchmark
     int replicateBlocks() throws IOException {
       // register datanode
-      StorageReport[] rep = { new StorageReport(dnRegistration.getStorageID(),
+      StorageReport[] rep = { new StorageReport(dnRegistration.getDatanodeUuid(),
           false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
       DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
           rep, 0, 0, 0).getCommands();
@@ -1012,14 +1031,14 @@ public class NNThroughputBenchmark imple
           DatanodeInfo dnInfo = blockTargets[t];
           DatanodeRegistration receivedDNReg;
           receivedDNReg = new DatanodeRegistration(dnInfo,
-            new DataStorage(nsInfo, dnInfo.getStorageID()),
+            new DataStorage(nsInfo),
             new ExportedBlockKeys(), VersionInfo.getVersion());
           ReceivedDeletedBlockInfo[] rdBlocks = {
             new ReceivedDeletedBlockInfo(
                   blocks[i], ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
                   null) };
           StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
-              receivedDNReg.getStorageID(), rdBlocks) };
+              receivedDNReg.getDatanodeUuid(), rdBlocks) };
           nameNodeProto.blockReceivedAndDeleted(receivedDNReg, nameNode
               .getNamesystem().getBlockPoolId(), report);
         }
@@ -1142,7 +1161,7 @@ public class NNThroughputBenchmark imple
               loc.getBlock().getLocalBlock(),
               ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null) };
           StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
-              datanodes[dnIdx].dnRegistration.getStorageID(), rdBlocks) };
+              datanodes[dnIdx].dnRegistration.getDatanodeUuid(), rdBlocks) };
           nameNodeProto.blockReceivedAndDeleted(datanodes[dnIdx].dnRegistration, loc
               .getBlock().getBlockPoolId(), report);
         }

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java?rev=1556076&r1=1556075&r2=1556076&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java Mon Jan  6 23:58:33 2014
@@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.MkdirOp;
@@ -111,8 +112,9 @@ public class NameNodeAdapter {
 
   public static HeartbeatResponse sendHeartBeat(DatanodeRegistration nodeReg,
       DatanodeDescriptor dd, FSNamesystem namesystem) throws IOException {
-    return namesystem.handleHeartbeat(nodeReg, dd.getCapacity(), 
-        dd.getDfsUsed(), dd.getRemaining(), dd.getBlockPoolUsed(), 0, 0, 0);
+    return namesystem.handleHeartbeat(nodeReg,
+        BlockManagerTestUtil.getStorageReportsForDatanode(dd),
+        0, 0, 0);
   }
 
   public static boolean setReplication(final FSNamesystem ns,
@@ -242,3 +244,4 @@ public class NameNodeAdapter {
     return NNStorage.getInProgressEditsFile(sd, startTxId);
   }
 }
+

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java?rev=1556076&r1=1556075&r2=1556076&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java Mon Jan  6 23:58:33 2014
@@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.protocol.L
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.net.Node;
@@ -99,13 +100,13 @@ public class TestAddBlockRetry {
     bmField.setAccessible(true);
     bmField.set(ns, spyBM);
 
-    doAnswer(new Answer<DatanodeDescriptor[]>() {
+    doAnswer(new Answer<DatanodeStorageInfo[]>() {
       @Override
-      public DatanodeDescriptor[] answer(InvocationOnMock invocation)
+      public DatanodeStorageInfo[] answer(InvocationOnMock invocation)
           throws Throwable {
         LOG.info("chooseTarget for " + src);
-        DatanodeDescriptor[] ret =
-            (DatanodeDescriptor[]) invocation.callRealMethod();
+        DatanodeStorageInfo[] ret =
+            (DatanodeStorageInfo[]) invocation.callRealMethod();
         count++;
         if(count == 1) { // run second addBlock()
           LOG.info("Starting second addBlock for " + src);

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java?rev=1556076&r1=1556075&r2=1556076&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java Mon Jan  6 23:58:33 2014
@@ -24,7 +24,7 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.junit.Test;
 
@@ -48,7 +48,7 @@ public class TestCommitBlockSynchronizat
       throws IOException {
     Configuration conf = new Configuration();
     FSImage image = new FSImage(conf);
-    DatanodeDescriptor[] targets = new DatanodeDescriptor[0];
+    final DatanodeStorageInfo[] targets = {};
 
     FSNamesystem namesystem = new FSNamesystem(conf, image);
     FSNamesystem namesystemSpy = spy(namesystem);

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java?rev=1556076&r1=1556075&r2=1556076&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java Mon Jan  6 23:58:33 2014
@@ -104,11 +104,11 @@ public class TestDeadDatanode {
     DatanodeRegistration reg = 
       DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
       
-    waitForDatanodeState(reg.getStorageID(), true, 20000);
+    waitForDatanodeState(reg.getDatanodeUuid(), true, 20000);
 
     // Shutdown and wait for datanode to be marked dead
     dn.shutdown();
-    waitForDatanodeState(reg.getStorageID(), false, 20000);
+    waitForDatanodeState(reg.getDatanodeUuid(), false, 20000);
 
     DatanodeProtocol dnp = cluster.getNameNodeRpc();
     
@@ -117,7 +117,7 @@ public class TestDeadDatanode {
         ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
         null) };
     StorageReceivedDeletedBlocks[] storageBlocks = { 
-        new StorageReceivedDeletedBlocks(reg.getStorageID(), blocks) };
+        new StorageReceivedDeletedBlocks(reg.getDatanodeUuid(), blocks) };
     
     // Ensure blockReceived call from dead datanode is rejected with IOException
     try {
@@ -129,7 +129,7 @@ public class TestDeadDatanode {
 
     // Ensure blockReport from dead datanode is rejected with IOException
     StorageBlockReport[] report = { new StorageBlockReport(
-        new DatanodeStorage(reg.getStorageID()),
+        new DatanodeStorage(reg.getDatanodeUuid()),
         new long[] { 0L, 0L, 0L }) };
     try {
       dnp.blockReport(reg, poolId, report);
@@ -140,7 +140,7 @@ public class TestDeadDatanode {
 
     // Ensure heartbeat from dead datanode is rejected with a command
     // that asks datanode to register again
-    StorageReport[] rep = { new StorageReport(reg.getStorageID(), false, 0, 0,
+    StorageReport[] rep = { new StorageReport(reg.getDatanodeUuid(), false, 0, 0,
         0, 0) };
     DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, rep, 0, 0, 0).getCommands();
     assertEquals(1, cmd.length);

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java?rev=1556076&r1=1556075&r2=1556076&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java Mon Jan  6 23:58:33 2014
@@ -55,7 +55,7 @@ public class TestListCorruptFileBlocks {
   static Log LOG = NameNode.stateChangeLog;
 
   /** check if nn.getCorruptFiles() returns a file that has corrupted blocks */
-  @Test
+  @Test (timeout=300000)
   public void testListCorruptFilesCorruptedBlock() throws Exception {
     MiniDFSCluster cluster = null;
     Random random = new Random();
@@ -131,7 +131,7 @@ public class TestListCorruptFileBlocks {
   /**
    * Check that listCorruptFileBlocks works while the namenode is still in safemode.
    */
-  @Test
+  @Test (timeout=300000)
   public void testListCorruptFileBlocksInSafeMode() throws Exception {
     MiniDFSCluster cluster = null;
     Random random = new Random();
@@ -262,7 +262,7 @@ public class TestListCorruptFileBlocks {
   }
   
   // deliberately remove blocks from a file and validate the list-corrupt-file-blocks API
-  @Test
+  @Test (timeout=300000)
   public void testlistCorruptFileBlocks() throws Exception {
     Configuration conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
@@ -372,7 +372,7 @@ public class TestListCorruptFileBlocks {
   /**
    * test listCorruptFileBlocks in DistributedFileSystem
    */
-  @Test
+  @Test (timeout=300000)
   public void testlistCorruptFileBlocksDFS() throws Exception {
     Configuration conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
@@ -446,7 +446,7 @@ public class TestListCorruptFileBlocks {
    * Also, test that DFS.listCorruptFileBlocks can make multiple successive
    * calls.
    */
-  @Test
+  @Test (timeout=300000)
   public void testMaxCorruptFiles() throws Exception {
     MiniDFSCluster cluster = null;
     try {

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java?rev=1556076&r1=1556075&r2=1556076&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java Mon Jan  6 23:58:33 2014
@@ -316,10 +316,11 @@ public class TestNamenodeRetryCache {
     ExtendedBlock oldBlock = new ExtendedBlock();
     ExtendedBlock newBlock = new ExtendedBlock();
     DatanodeID[] newNodes = new DatanodeID[2];
+    String[] newStorages = new String[2];
     
     newCall();
     try {
-      ns0.updatePipeline("testClient", oldBlock, newBlock, newNodes);
+      ns0.updatePipeline("testClient", oldBlock, newBlock, newNodes, newStorages);
       fail("Expect StandbyException from the updatePipeline call");
     } catch (StandbyException e) {
       // expected, since in the beginning both nn are in standby state
@@ -329,7 +330,7 @@ public class TestNamenodeRetryCache {
     
     cluster.transitionToActive(0);
     try {
-      ns0.updatePipeline("testClient", oldBlock, newBlock, newNodes);
+      ns0.updatePipeline("testClient", oldBlock, newBlock, newNodes, newStorages);
     } catch (IOException e) {
       // ignore call should not hang.
     }

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java?rev=1556076&r1=1556075&r2=1556076&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java Mon Jan  6 23:58:33 2014
@@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.server.blo
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -523,16 +524,17 @@ public class TestPipelinesFailover {
       (BlockInfoUnderConstruction)storedBlock;
     // We expect that the replica with the most recent heart beat will be
     // the one to be in charge of the synchronization / recovery protocol.
-    DatanodeDescriptor[] datanodes = ucBlock.getExpectedLocations();
-    DatanodeDescriptor expectedPrimary = datanodes[0];
-    long mostRecentLastUpdate = expectedPrimary.getLastUpdate();
-    for (int i = 1; i < datanodes.length; i++) {
-      if (datanodes[i].getLastUpdate() > mostRecentLastUpdate) {
-        expectedPrimary = datanodes[i];
-        mostRecentLastUpdate = expectedPrimary.getLastUpdate();
+    final DatanodeStorageInfo[] storages = ucBlock.getExpectedStorageLocations();
+    DatanodeStorageInfo expectedPrimary = storages[0];
+    long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor().getLastUpdate();
+    for (int i = 1; i < storages.length; i++) {
+      final long lastUpdate = storages[i].getDatanodeDescriptor().getLastUpdate();
+      if (lastUpdate > mostRecentLastUpdate) {
+        expectedPrimary = storages[i];
+        mostRecentLastUpdate = lastUpdate;
       }
     }
-    return expectedPrimary;
+    return expectedPrimary.getDatanodeDescriptor();
   }
 
   private DistributedFileSystem createFsAsOtherUser(

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java?rev=1556076&r1=1556075&r2=1556076&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java Mon Jan  6 23:58:33 2014
@@ -707,9 +707,10 @@ public class TestRetryCacheWithHA {
       DatanodeInfo[] newNodes = new DatanodeInfo[2];
       newNodes[0] = nodes[0];
       newNodes[1] = nodes[1];
+      String[] storageIDs = {"s0", "s1"};
       
       client.getNamenode().updatePipeline(client.getClientName(), oldBlock,
-          newBlock, newNodes);
+          newBlock, newNodes, storageIDs);
       out.close();
     }
 
@@ -719,10 +720,10 @@ public class TestRetryCacheWithHA {
           .getNamesystem(0).getFSDirectory().getINode4Write(file).asFile();
       BlockInfoUnderConstruction blkUC = 
           (BlockInfoUnderConstruction) (fileNode.getBlocks())[1];
-      int datanodeNum = blkUC.getExpectedLocations().length;
+      int datanodeNum = blkUC.getExpectedStorageLocations().length;
       for (int i = 0; i < CHECKTIMES && datanodeNum != 2; i++) {
         Thread.sleep(1000);
-        datanodeNum = blkUC.getExpectedLocations().length;
+        datanodeNum = blkUC.getExpectedStorageLocations().length;
       }
       return datanodeNum == 2;
     }

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java?rev=1556076&r1=1556075&r2=1556076&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java Mon Jan  6 23:58:33 2014
@@ -238,7 +238,7 @@ public class TestNameNodeMetrics {
     cluster.getNamesystem().writeLock();
     try {
       bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
-          "TEST");
+          "STORAGE_ID", "TEST");
     } finally {
       cluster.getNamesystem().writeUnlock();
     }
@@ -281,7 +281,7 @@ public class TestNameNodeMetrics {
     cluster.getNamesystem().writeLock();
     try {
       bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
-          "TEST");
+          "STORAGE_ID", "TEST");
     } finally {
       cluster.getNamesystem().writeUnlock();
     }
@@ -443,7 +443,8 @@ public class TestNameNodeMetrics {
     // We have one sync when the cluster starts up, just opening the journal
     assertCounter("SyncsNumOps", 1L, rb);
     // Each datanode reports in when the cluster comes up
-    assertCounter("BlockReportNumOps", (long)DATANODE_COUNT, rb);
+    assertCounter("BlockReportNumOps",
+                  (long)DATANODE_COUNT*MiniDFSCluster.DIRS_PER_DATANODE, rb);
     
     // Sleep for an interval+slop to let the percentiles rollover
     Thread.sleep((PERCENTILES_INTERVAL+1)*1000);

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored?rev=1556076&r1=1556075&r2=1556076&view=diff
==============================================================================
Binary files - no diff available.

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml?rev=1556076&r1=1556075&r2=1556076&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml Mon Jan  6 23:58:33 2014
@@ -1,6 +1,6 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <EDITS>
-  <EDITS_VERSION>-47</EDITS_VERSION>
+  <EDITS_VERSION>-49</EDITS_VERSION>
   <RECORD>
     <OPCODE>OP_START_LOG_SEGMENT</OPCODE>
     <DATA>
@@ -13,8 +13,8 @@
       <TXID>2</TXID>
       <DELEGATION_KEY>
         <KEY_ID>1</KEY_ID>
-        <EXPIRY_DATE>1375509063810</EXPIRY_DATE>
-        <KEY>4d47710649039b98</KEY>
+        <EXPIRY_DATE>1388803909637</EXPIRY_DATE>
+        <KEY>02b5879ef37263c7</KEY>
       </DELEGATION_KEY>
     </DATA>
   </RECORD>
@@ -24,8 +24,8 @@
       <TXID>3</TXID>
       <DELEGATION_KEY>
         <KEY_ID>2</KEY_ID>
-        <EXPIRY_DATE>1375509063812</EXPIRY_DATE>
-        <KEY>38cbb1d8fd90fcb2</KEY>
+        <EXPIRY_DATE>1388803909659</EXPIRY_DATE>
+        <KEY>ae5f64e833bca20c</KEY>
       </DELEGATION_KEY>
     </DATA>
   </RECORD>
@@ -37,17 +37,17 @@
       <INODEID>16386</INODEID>
       <PATH>/file_create_u\0001;F431</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1374817864805</MTIME>
-      <ATIME>1374817864805</ATIME>
+      <MTIME>1388112711695</MTIME>
+      <ATIME>1388112711695</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1676409172_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_1163912231_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>jing</USERNAME>
+        <USERNAME>aagarwal</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CLIENTID>90461de5-e4ed-45f8-bee4-3b0e4c7979bd</RPC_CLIENTID>
       <RPC_CALLID>8</RPC_CALLID>
     </DATA>
   </RECORD>
@@ -59,13 +59,13 @@
       <INODEID>0</INODEID>
       <PATH>/file_create_u\0001;F431</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1374817864816</MTIME>
-      <ATIME>1374817864805</ATIME>
+      <MTIME>1388112711779</MTIME>
+      <ATIME>1388112711695</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>jing</USERNAME>
+        <USERNAME>aagarwal</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -78,8 +78,8 @@
       <LENGTH>0</LENGTH>
       <SRC>/file_create_u\0001;F431</SRC>
       <DST>/file_moved</DST>
-      <TIMESTAMP>1374817864818</TIMESTAMP>
-      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <TIMESTAMP>1388112711794</TIMESTAMP>
+      <RPC_CLIENTID>90461de5-e4ed-45f8-bee4-3b0e4c7979bd</RPC_CLIENTID>
       <RPC_CALLID>10</RPC_CALLID>
     </DATA>
   </RECORD>
@@ -89,8 +89,8 @@
       <TXID>7</TXID>
       <LENGTH>0</LENGTH>
       <PATH>/file_moved</PATH>
-      <TIMESTAMP>1374817864822</TIMESTAMP>
-      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <TIMESTAMP>1388112711831</TIMESTAMP>
+      <RPC_CLIENTID>90461de5-e4ed-45f8-bee4-3b0e4c7979bd</RPC_CLIENTID>
       <RPC_CALLID>11</RPC_CALLID>
     </DATA>
   </RECORD>
@@ -101,9 +101,9 @@
       <LENGTH>0</LENGTH>
       <INODEID>16387</INODEID>
       <PATH>/directory_mkdir</PATH>
-      <TIMESTAMP>1374817864825</TIMESTAMP>
+      <TIMESTAMP>1388112711853</TIMESTAMP>
       <PERMISSION_STATUS>
-        <USERNAME>jing</USERNAME>
+        <USERNAME>aagarwal</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>493</MODE>
       </PERMISSION_STATUS>
@@ -136,7 +136,7 @@
       <TXID>12</TXID>
       <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
       <SNAPSHOTNAME>snapshot1</SNAPSHOTNAME>
-      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CLIENTID>90461de5-e4ed-45f8-bee4-3b0e4c7979bd</RPC_CLIENTID>
       <RPC_CALLID>16</RPC_CALLID>
     </DATA>
   </RECORD>
@@ -147,7 +147,7 @@
       <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
       <SNAPSHOTOLDNAME>snapshot1</SNAPSHOTOLDNAME>
       <SNAPSHOTNEWNAME>snapshot2</SNAPSHOTNEWNAME>
-      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CLIENTID>90461de5-e4ed-45f8-bee4-3b0e4c7979bd</RPC_CLIENTID>
       <RPC_CALLID>17</RPC_CALLID>
     </DATA>
   </RECORD>
@@ -157,7 +157,7 @@
       <TXID>14</TXID>
       <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
       <SNAPSHOTNAME>snapshot2</SNAPSHOTNAME>
-      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CLIENTID>90461de5-e4ed-45f8-bee4-3b0e4c7979bd</RPC_CLIENTID>
       <RPC_CALLID>18</RPC_CALLID>
     </DATA>
   </RECORD>
@@ -169,17 +169,17 @@
       <INODEID>16388</INODEID>
       <PATH>/file_create_u\0001;F431</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1374817864846</MTIME>
-      <ATIME>1374817864846</ATIME>
+      <MTIME>1388112711885</MTIME>
+      <ATIME>1388112711885</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1676409172_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_1163912231_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>jing</USERNAME>
+        <USERNAME>aagarwal</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CLIENTID>90461de5-e4ed-45f8-bee4-3b0e4c7979bd</RPC_CLIENTID>
       <RPC_CALLID>19</RPC_CALLID>
     </DATA>
   </RECORD>
@@ -191,13 +191,13 @@
       <INODEID>0</INODEID>
       <PATH>/file_create_u\0001;F431</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1374817864848</MTIME>
-      <ATIME>1374817864846</ATIME>
+      <MTIME>1388112711899</MTIME>
+      <ATIME>1388112711885</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>jing</USERNAME>
+        <USERNAME>aagarwal</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -253,9 +253,9 @@
       <LENGTH>0</LENGTH>
       <SRC>/file_create_u\0001;F431</SRC>
       <DST>/file_moved</DST>
-      <TIMESTAMP>1374817864860</TIMESTAMP>
+      <TIMESTAMP>1388112711919</TIMESTAMP>
       <OPTIONS>NONE</OPTIONS>
-      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CLIENTID>90461de5-e4ed-45f8-bee4-3b0e4c7979bd</RPC_CLIENTID>
       <RPC_CALLID>26</RPC_CALLID>
     </DATA>
   </RECORD>
@@ -267,17 +267,17 @@
       <INODEID>16389</INODEID>
       <PATH>/file_concat_target</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1374817864864</MTIME>
-      <ATIME>1374817864864</ATIME>
+      <MTIME>1388112711932</MTIME>
+      <ATIME>1388112711932</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1676409172_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_1163912231_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>jing</USERNAME>
+        <USERNAME>aagarwal</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CLIENTID>90461de5-e4ed-45f8-bee4-3b0e4c7979bd</RPC_CLIENTID>
       <RPC_CALLID>28</RPC_CALLID>
     </DATA>
   </RECORD>
@@ -388,8 +388,8 @@
       <INODEID>0</INODEID>
       <PATH>/file_concat_target</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1374817864927</MTIME>
-      <ATIME>1374817864864</ATIME>
+      <MTIME>1388112712188</MTIME>
+      <ATIME>1388112711932</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
@@ -409,7 +409,7 @@
         <GENSTAMP>1003</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>jing</USERNAME>
+        <USERNAME>aagarwal</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -423,17 +423,17 @@
       <INODEID>16390</INODEID>
       <PATH>/file_concat_0</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1374817864929</MTIME>
-      <ATIME>1374817864929</ATIME>
+      <MTIME>1388112712191</MTIME>
+      <ATIME>1388112712191</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1676409172_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_1163912231_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>jing</USERNAME>
+        <USERNAME>aagarwal</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CLIENTID>90461de5-e4ed-45f8-bee4-3b0e4c7979bd</RPC_CLIENTID>
       <RPC_CALLID>41</RPC_CALLID>
     </DATA>
   </RECORD>
@@ -544,8 +544,8 @@
       <INODEID>0</INODEID>
       <PATH>/file_concat_0</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1374817864947</MTIME>
-      <ATIME>1374817864929</ATIME>
+      <MTIME>1388112712212</MTIME>
+      <ATIME>1388112712191</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
@@ -565,7 +565,7 @@
         <GENSTAMP>1006</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>jing</USERNAME>
+        <USERNAME>aagarwal</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -579,17 +579,17 @@
       <INODEID>16391</INODEID>
       <PATH>/file_concat_1</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1374817864950</MTIME>
-      <ATIME>1374817864950</ATIME>
+      <MTIME>1388112712215</MTIME>
+      <ATIME>1388112712215</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1676409172_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_1163912231_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>jing</USERNAME>
+        <USERNAME>aagarwal</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CLIENTID>90461de5-e4ed-45f8-bee4-3b0e4c7979bd</RPC_CLIENTID>
       <RPC_CALLID>53</RPC_CALLID>
     </DATA>
   </RECORD>
@@ -700,8 +700,8 @@
       <INODEID>0</INODEID>
       <PATH>/file_concat_1</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1374817864966</MTIME>
-      <ATIME>1374817864950</ATIME>
+      <MTIME>1388112712238</MTIME>
+      <ATIME>1388112712215</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
@@ -721,7 +721,7 @@
         <GENSTAMP>1009</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>jing</USERNAME>
+        <USERNAME>aagarwal</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -733,12 +733,12 @@
       <TXID>56</TXID>
       <LENGTH>0</LENGTH>
       <TRG>/file_concat_target</TRG>
-      <TIMESTAMP>1374817864967</TIMESTAMP>
+      <TIMESTAMP>1388112712241</TIMESTAMP>
       <SOURCES>
         <SOURCE1>/file_concat_0</SOURCE1>
         <SOURCE2>/file_concat_1</SOURCE2>
       </SOURCES>
-      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CLIENTID>90461de5-e4ed-45f8-bee4-3b0e4c7979bd</RPC_CLIENTID>
       <RPC_CALLID>64</RPC_CALLID>
     </DATA>
   </RECORD>
@@ -750,14 +750,14 @@
       <INODEID>16392</INODEID>
       <PATH>/file_symlink</PATH>
       <VALUE>/file_concat_target</VALUE>
-      <MTIME>1374817864971</MTIME>
-      <ATIME>1374817864971</ATIME>
+      <MTIME>1388112712273</MTIME>
+      <ATIME>1388112712273</ATIME>
       <PERMISSION_STATUS>
-        <USERNAME>jing</USERNAME>
+        <USERNAME>aagarwal</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>511</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CLIENTID>90461de5-e4ed-45f8-bee4-3b0e4c7979bd</RPC_CLIENTID>
       <RPC_CALLID>65</RPC_CALLID>
     </DATA>
   </RECORD>
@@ -768,14 +768,14 @@
       <DELEGATION_TOKEN_IDENTIFIER>
         <KIND>HDFS_DELEGATION_TOKEN</KIND>
         <SEQUENCE_NUMBER>1</SEQUENCE_NUMBER>
-        <OWNER>jing</OWNER>
+        <OWNER>aagarwal</OWNER>
         <RENEWER>JobTracker</RENEWER>
         <REALUSER></REALUSER>
-        <ISSUE_DATE>1374817864974</ISSUE_DATE>
-        <MAX_DATE>1375422664974</MAX_DATE>
+        <ISSUE_DATE>1388112712276</ISSUE_DATE>
+        <MAX_DATE>1388717512276</MAX_DATE>
         <MASTER_KEY_ID>2</MASTER_KEY_ID>
       </DELEGATION_TOKEN_IDENTIFIER>
-      <EXPIRY_TIME>1374904264974</EXPIRY_TIME>
+      <EXPIRY_TIME>1388199112276</EXPIRY_TIME>
     </DATA>
   </RECORD>
   <RECORD>
@@ -785,14 +785,14 @@
       <DELEGATION_TOKEN_IDENTIFIER>
         <KIND>HDFS_DELEGATION_TOKEN</KIND>
         <SEQUENCE_NUMBER>1</SEQUENCE_NUMBER>
-        <OWNER>jing</OWNER>
+        <OWNER>aagarwal</OWNER>
         <RENEWER>JobTracker</RENEWER>
         <REALUSER></REALUSER>
-        <ISSUE_DATE>1374817864974</ISSUE_DATE>
-        <MAX_DATE>1375422664974</MAX_DATE>
+        <ISSUE_DATE>1388112712276</ISSUE_DATE>
+        <MAX_DATE>1388717512276</MAX_DATE>
         <MASTER_KEY_ID>2</MASTER_KEY_ID>
       </DELEGATION_TOKEN_IDENTIFIER>
-      <EXPIRY_TIME>1374904265012</EXPIRY_TIME>
+      <EXPIRY_TIME>1388199112421</EXPIRY_TIME>
     </DATA>
   </RECORD>
   <RECORD>
@@ -802,11 +802,11 @@
       <DELEGATION_TOKEN_IDENTIFIER>
         <KIND>HDFS_DELEGATION_TOKEN</KIND>
         <SEQUENCE_NUMBER>1</SEQUENCE_NUMBER>
-        <OWNER>jing</OWNER>
+        <OWNER>aagarwal</OWNER>
         <RENEWER>JobTracker</RENEWER>
         <REALUSER></REALUSER>
-        <ISSUE_DATE>1374817864974</ISSUE_DATE>
-        <MAX_DATE>1375422664974</MAX_DATE>
+        <ISSUE_DATE>1388112712276</ISSUE_DATE>
+        <MAX_DATE>1388717512276</MAX_DATE>
         <MASTER_KEY_ID>2</MASTER_KEY_ID>
       </DELEGATION_TOKEN_IDENTIFIER>
     </DATA>
@@ -819,17 +819,17 @@
       <INODEID>16393</INODEID>
       <PATH>/hard-lease-recovery-test</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1374817865017</MTIME>
-      <ATIME>1374817865017</ATIME>
+      <MTIME>1388112712441</MTIME>
+      <ATIME>1388112712441</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1676409172_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_1163912231_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>jing</USERNAME>
+        <USERNAME>aagarwal</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CLIENTID>90461de5-e4ed-45f8-bee4-3b0e4c7979bd</RPC_CLIENTID>
       <RPC_CALLID>69</RPC_CALLID>
     </DATA>
   </RECORD>
@@ -868,7 +868,7 @@
       <PATH>/hard-lease-recovery-test</PATH>
       <BLOCK>
         <BLOCK_ID>1073741834</BLOCK_ID>
-        <NUM_BYTES>0</NUM_BYTES>
+        <NUM_BYTES>11</NUM_BYTES>
         <GENSTAMP>1010</GENSTAMP>
       </BLOCK>
       <RPC_CLIENTID></RPC_CLIENTID>
@@ -886,31 +886,47 @@
     <OPCODE>OP_REASSIGN_LEASE</OPCODE>
     <DATA>
       <TXID>67</TXID>
-      <LEASEHOLDER>DFSClient_NONMAPREDUCE_-1676409172_1</LEASEHOLDER>
+      <LEASEHOLDER>DFSClient_NONMAPREDUCE_1163912231_1</LEASEHOLDER>
       <PATH>/hard-lease-recovery-test</PATH>
       <NEWHOLDER>HDFS_NameNode</NEWHOLDER>
     </DATA>
   </RECORD>
   <RECORD>
-    <OPCODE>OP_CLOSE</OPCODE>
+    <OPCODE>OP_SET_GENSTAMP_V2</OPCODE>
     <DATA>
       <TXID>68</TXID>
+      <GENSTAMPV2>1012</GENSTAMPV2>
+    </DATA>
+  </RECORD>
+  <RECORD>
+    <OPCODE>OP_REASSIGN_LEASE</OPCODE>
+    <DATA>
+      <TXID>69</TXID>
+      <LEASEHOLDER>HDFS_NameNode</LEASEHOLDER>
+      <PATH>/hard-lease-recovery-test</PATH>
+      <NEWHOLDER>HDFS_NameNode</NEWHOLDER>
+    </DATA>
+  </RECORD>
+  <RECORD>
+    <OPCODE>OP_CLOSE</OPCODE>
+    <DATA>
+      <TXID>70</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>0</INODEID>
       <PATH>/hard-lease-recovery-test</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1374817867688</MTIME>
-      <ATIME>1374817865017</ATIME>
+      <MTIME>1388112717425</MTIME>
+      <ATIME>1388112712441</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
       <BLOCK>
         <BLOCK_ID>1073741834</BLOCK_ID>
         <NUM_BYTES>11</NUM_BYTES>
-        <GENSTAMP>1011</GENSTAMP>
+        <GENSTAMP>1012</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>jing</USERNAME>
+        <USERNAME>aagarwal</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -919,7 +935,7 @@
   <RECORD>
     <OPCODE>OP_END_LOG_SEGMENT</OPCODE>
     <DATA>
-      <TXID>69</TXID>
+      <TXID>71</TXID>
     </DATA>
   </RECORD>
 </EDITS>



Mime
View raw message