hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1294028 [5/6] - in /hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/protocol/ src/main/java/org/apache/hadoop/hdfs/protocolPB/...
Date Mon, 27 Feb 2012 04:54:45 GMT
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java?rev=1294028&r1=1294027&r2=1294028&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java Mon Feb 27 04:54:33 2012
@@ -51,14 +51,20 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
+import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService;
+import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto;
+import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolPB;
+import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 import org.apache.hadoop.io.TestWritable;
 import org.apache.hadoop.ipc.Client;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.ProtocolSignature;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.Server;
@@ -76,6 +82,10 @@ import org.junit.Test;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
+import com.google.protobuf.BlockingService;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
 /** Unit tests for block tokens */
 public class TestBlockToken {
   public static final Log LOG = LogFactory.getLog(TestBlockToken.class);
@@ -96,9 +106,9 @@ public class TestBlockToken {
     ((Log4JLogger) SaslRpcServer.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger) SaslInputStream.LOG).getLogger().setLevel(Level.ALL);
   }
-  
+
   /** Directory where we can count our open file descriptors under Linux */
-  static File FD_DIR = new File("/proc/self/fd/");  
+  static File FD_DIR = new File("/proc/self/fd/");
 
   long blockKeyUpdateInterval = 10 * 60 * 1000; // 10 mins
   long blockTokenLifetime = 2 * 60 * 1000; // 2 mins
@@ -106,21 +116,24 @@ public class TestBlockToken {
   ExtendedBlock block2 = new ExtendedBlock("10", 10L);
   ExtendedBlock block3 = new ExtendedBlock("-10", -108L);
 
-  private static class getLengthAnswer implements Answer<Long> {
+  private static class GetLengthAnswer implements
+      Answer<GetReplicaVisibleLengthResponseProto> {
     BlockTokenSecretManager sm;
     BlockTokenIdentifier ident;
 
-    public getLengthAnswer(BlockTokenSecretManager sm,
+    public GetLengthAnswer(BlockTokenSecretManager sm,
         BlockTokenIdentifier ident) {
       this.sm = sm;
       this.ident = ident;
     }
 
     @Override
-    public Long answer(InvocationOnMock invocation) throws IOException {
+    public GetReplicaVisibleLengthResponseProto answer(
+        InvocationOnMock invocation) throws IOException {
       Object args[] = invocation.getArguments();
-      assertEquals(1, args.length);
-      ExtendedBlock block = (ExtendedBlock) args[0];
+      assertEquals(2, args.length);
+      GetReplicaVisibleLengthRequestProto req = 
+          (GetReplicaVisibleLengthRequestProto) args[1];
       Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
           .getTokenIdentifiers();
       assertEquals("Only one BlockTokenIdentifier expected", 1, tokenIds.size());
@@ -129,15 +142,18 @@ public class TestBlockToken {
         BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
         LOG.info("Got: " + id.toString());
         assertTrue("Received BlockTokenIdentifier is wrong", ident.equals(id));
-        sm.checkAccess(id, null, block, BlockTokenSecretManager.AccessMode.WRITE);
+        sm.checkAccess(id, null, PBHelper.convert(req.getBlock()),
+            BlockTokenSecretManager.AccessMode.WRITE);
         result = id.getBlockId();
       }
-      return result;
+      return GetReplicaVisibleLengthResponseProto.newBuilder()
+          .setLength(result).build();
     }
   }
 
   private BlockTokenIdentifier generateTokenId(BlockTokenSecretManager sm,
-      ExtendedBlock block, EnumSet<BlockTokenSecretManager.AccessMode> accessModes)
+      ExtendedBlock block,
+      EnumSet<BlockTokenSecretManager.AccessMode> accessModes)
       throws IOException {
     Token<BlockTokenIdentifier> token = sm.generateToken(block, accessModes);
     BlockTokenIdentifier id = sm.createIdentifier();
@@ -151,12 +167,12 @@ public class TestBlockToken {
     TestWritable.testWritable(new BlockTokenIdentifier());
     BlockTokenSecretManager sm = new BlockTokenSecretManager(true,
         blockKeyUpdateInterval, blockTokenLifetime);
-    TestWritable.testWritable(generateTokenId(sm, block1, EnumSet
-        .allOf(BlockTokenSecretManager.AccessMode.class)));
-    TestWritable.testWritable(generateTokenId(sm, block2, EnumSet
-        .of(BlockTokenSecretManager.AccessMode.WRITE)));
-    TestWritable.testWritable(generateTokenId(sm, block3, EnumSet
-        .noneOf(BlockTokenSecretManager.AccessMode.class)));
+    TestWritable.testWritable(generateTokenId(sm, block1,
+        EnumSet.allOf(BlockTokenSecretManager.AccessMode.class)));
+    TestWritable.testWritable(generateTokenId(sm, block2,
+        EnumSet.of(BlockTokenSecretManager.AccessMode.WRITE)));
+    TestWritable.testWritable(generateTokenId(sm, block3,
+        EnumSet.noneOf(BlockTokenSecretManager.AccessMode.class)));
   }
 
   private void tokenGenerationAndVerification(BlockTokenSecretManager master,
@@ -176,8 +192,8 @@ public class TestBlockToken {
       slave.checkAccess(token2, null, block2, mode);
     }
     // multi-mode tokens
-    Token<BlockTokenIdentifier> mtoken = master.generateToken(block3, EnumSet
-        .allOf(BlockTokenSecretManager.AccessMode.class));
+    Token<BlockTokenIdentifier> mtoken = master.generateToken(block3,
+        EnumSet.allOf(BlockTokenSecretManager.AccessMode.class));
     for (BlockTokenSecretManager.AccessMode mode : BlockTokenSecretManager.AccessMode
         .values()) {
       master.checkAccess(mtoken, null, block3, mode);
@@ -202,25 +218,32 @@ public class TestBlockToken {
     slaveHandler.setKeys(keys);
     tokenGenerationAndVerification(masterHandler, slaveHandler);
   }
-  
+
   private Server createMockDatanode(BlockTokenSecretManager sm,
-      Token<BlockTokenIdentifier> token) throws IOException {
-    ClientDatanodeProtocol mockDN = mock(ClientDatanodeProtocol.class);
+      Token<BlockTokenIdentifier> token) throws IOException, ServiceException {
+    ClientDatanodeProtocolPB mockDN = mock(ClientDatanodeProtocolPB.class);
     when(mockDN.getProtocolVersion(anyString(), anyLong())).thenReturn(
-        ClientDatanodeProtocol.versionID);
-    doReturn(ProtocolSignature.getProtocolSignature(
-        mockDN, ClientDatanodeProtocol.class.getName(),
-        ClientDatanodeProtocol.versionID, 0))
-      .when(mockDN).getProtocolSignature(anyString(), anyLong(), anyInt());
+        RPC.getProtocolVersion(ClientDatanodeProtocolPB.class));
+    doReturn(
+        ProtocolSignature.getProtocolSignature(mockDN,
+            ClientDatanodeProtocolPB.class.getName(),
+            RPC.getProtocolVersion(ClientDatanodeProtocolPB.class), 0)).when(
+        mockDN).getProtocolSignature(anyString(), anyLong(), anyInt());
 
     BlockTokenIdentifier id = sm.createIdentifier();
     id.readFields(new DataInputStream(new ByteArrayInputStream(token
         .getIdentifier())));
-    doAnswer(new getLengthAnswer(sm, id)).when(mockDN).getReplicaVisibleLength(
-        any(ExtendedBlock.class));
-
-    return RPC.getServer(ClientDatanodeProtocol.class, mockDN,
-        ADDRESS, 0, 5, true, conf, sm);
+    
+    doAnswer(new GetLengthAnswer(sm, id)).when(mockDN)
+        .getReplicaVisibleLength(any(RpcController.class),
+            any(GetReplicaVisibleLengthRequestProto.class));
+
+    RPC.setProtocolEngine(conf, ClientDatanodeProtocolPB.class,
+        ProtobufRpcEngine.class);
+    BlockingService service = ClientDatanodeProtocolService
+        .newReflectiveBlockingService(mockDN);
+    return RPC.getServer(ClientDatanodeProtocolPB.class, service, ADDRESS, 0, 5,
+        true, conf, sm);
   }
 
   @Test
@@ -241,9 +264,8 @@ public class TestBlockToken {
 
     ClientDatanodeProtocol proxy = null;
     try {
-      proxy = RPC.getProxy(
-          ClientDatanodeProtocol.class, ClientDatanodeProtocol.versionID, addr,
-          ticket, conf, NetUtils.getDefaultSocketFactory(conf));
+      proxy = DFSUtil.createClientDatanodeProtocolProxy(addr, ticket, conf,
+          NetUtils.getDefaultSocketFactory(conf));
       assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
     } finally {
       server.stop();
@@ -255,8 +277,8 @@ public class TestBlockToken {
 
   /**
    * Test that fast repeated invocations of createClientDatanodeProtocolProxy
-   * will not end up using up thousands of sockets. This is a regression test for
-   * HDFS-1965.
+   * will not end up using up thousands of sockets. This is a regression test
+   * for HDFS-1965.
    */
   @Test
   public void testBlockTokenRpcLeak() throws Exception {
@@ -270,9 +292,9 @@ public class TestBlockToken {
     server.start();
 
     final InetSocketAddress addr = NetUtils.getConnectAddress(server);
-    DatanodeID fakeDnId = new DatanodeID(
-        "localhost:" + addr.getPort(), "fake-storage", 0, addr.getPort());
-    
+    DatanodeID fakeDnId = new DatanodeID("localhost:" + addr.getPort(),
+        "fake-storage", 0, addr.getPort());
+
     ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
     LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
     fakeBlock.setBlockToken(token);
@@ -282,19 +304,19 @@ public class TestBlockToken {
     // RPC "Client" object to stay above 0 such that RPC.stopProxy doesn't
     // actually close the TCP connections to the real target DN.
     ClientDatanodeProtocol proxyToNoWhere = RPC.getProxy(
-        ClientDatanodeProtocol.class, ClientDatanodeProtocol.versionID, 
+        ClientDatanodeProtocol.class, ClientDatanodeProtocol.versionID,
         new InetSocketAddress("1.1.1.1", 1),
-        UserGroupInformation.createRemoteUser("junk"),
-        conf, NetUtils.getDefaultSocketFactory(conf));
-    
+        UserGroupInformation.createRemoteUser("junk"), conf,
+        NetUtils.getDefaultSocketFactory(conf));
+
     ClientDatanodeProtocol proxy = null;
 
     int fdsAtStart = countOpenFileDescriptors();
     try {
       long endTime = System.currentTimeMillis() + 3000;
       while (System.currentTimeMillis() < endTime) {
-        proxy = DFSUtil.createClientDatanodeProtocolProxy(
-            fakeDnId, conf, 1000, fakeBlock);
+        proxy = DFSUtil.createClientDatanodeProtocolProxy(fakeDnId, conf, 1000,
+            fakeBlock);
         assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
         if (proxy != null) {
           RPC.stopProxy(proxy);
@@ -303,32 +325,31 @@ public class TestBlockToken {
       }
 
       int fdsAtEnd = countOpenFileDescriptors();
-      
+
       if (fdsAtEnd - fdsAtStart > 50) {
         fail("Leaked " + (fdsAtEnd - fdsAtStart) + " fds!");
       }
     } finally {
       server.stop();
     }
-    
+
     RPC.stopProxy(proxyToNoWhere);
   }
 
   /**
-   * @return the current number of file descriptors open by this
-   * process.
+   * @return the current number of file descriptors open by this process.
    */
-  private static int countOpenFileDescriptors() throws IOException {
+  private static int countOpenFileDescriptors() {
     return FD_DIR.list().length;
   }
 
-  /** 
+  /**
    * Test {@link BlockPoolTokenSecretManager}
    */
   @Test
   public void testBlockPoolTokenSecretManager() throws Exception {
     BlockPoolTokenSecretManager bpMgr = new BlockPoolTokenSecretManager();
-    
+
     // Test BlockPoolSecretManager with upto 10 block pools
     for (int i = 0; i < 10; i++) {
       String bpid = Integer.toString(i);
@@ -337,12 +358,11 @@ public class TestBlockToken {
       BlockTokenSecretManager slaveHandler = new BlockTokenSecretManager(false,
           blockKeyUpdateInterval, blockTokenLifetime);
       bpMgr.addBlockPool(bpid, slaveHandler);
-      
-      
+
       ExportedBlockKeys keys = masterHandler.exportKeys();
       bpMgr.setKeys(bpid, keys);
       tokenGenerationAndVerification(masterHandler, bpMgr.get(bpid));
-      
+
       // Test key updating
       masterHandler.updateKeys();
       tokenGenerationAndVerification(masterHandler, bpMgr.get(bpid));
@@ -351,11 +371,12 @@ public class TestBlockToken {
       tokenGenerationAndVerification(masterHandler, bpMgr.get(bpid));
     }
   }
-  
+
   /**
-   * This test writes a file and gets the block locations without closing
-   * the file, and tests the block token in the last block. Block token is
-   * verified by ensuring it is of correct kind.
+   * This test writes a file and gets the block locations without closing the
+   * file, and tests the block token in the last block. Block token is verified
+   * by ensuring it is of correct kind.
+   * 
    * @throws IOException
    * @throws InterruptedException
    */
@@ -389,5 +410,5 @@ public class TestBlockToken {
     } finally {
       cluster.shutdown();
     }
-  } 
+  }
 }

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java?rev=1294028&r1=1294027&r2=1294028&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java Mon Feb 27 04:54:33 2012
@@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
+import org.apache.hadoop.test.GenericTestUtils;
 
 import org.junit.Test;
 import static org.junit.Assert.*;
@@ -61,7 +62,8 @@ public class TestDistributedUpgrade {
    * Attempts to start a NameNode with the given operation.  Starting
    * the NameNode should throw an exception.
    */
-  void startNameNodeShouldFail(StartupOption operation) {
+  void startNameNodeShouldFail(StartupOption operation,
+      String exceptionSubstring) {
     try {
       //cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).startupOption(operation).build(); // should fail
       // we set manage dirs to true as NN has to start from untar'ed image with 
@@ -73,8 +75,8 @@ public class TestDistributedUpgrade {
                                               .build(); // should fail
       throw new AssertionError("NameNode should have failed to start");
     } catch (Exception expected) {
-      expected = null;
-      // expected
+      GenericTestUtils.assertExceptionContains(
+          exceptionSubstring, expected);
     }
   }
   
@@ -117,7 +119,7 @@ public class TestDistributedUpgrade {
     conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); // block scanning off
 
     log("NameNode start in regular mode when dustributed upgrade is required", numDirs);
-    startNameNodeShouldFail(StartupOption.REGULAR);
+    startNameNodeShouldFail(StartupOption.REGULAR, "contains an old layout version");
 
     log("Start NameNode only distributed upgrade", numDirs);
     // cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false)
@@ -130,10 +132,12 @@ public class TestDistributedUpgrade {
     cluster.shutdown();
 
     log("NameNode start in regular mode when dustributed upgrade has been started", numDirs);
-    startNameNodeShouldFail(StartupOption.REGULAR);
+    startNameNodeShouldFail(StartupOption.REGULAR,
+        "Previous distributed upgrade was not completed");
 
     log("NameNode rollback to the old version that require a dustributed upgrade", numDirs);
-    startNameNodeShouldFail(StartupOption.ROLLBACK);
+    startNameNodeShouldFail(StartupOption.ROLLBACK,
+        "Cannot rollback to storage version -7 using this version");
 
     log("Normal distributed upgrade for the cluster", numDirs);
     cluster = new MiniDFSCluster.Builder(conf)

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeAdapter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeAdapter.java?rev=1294028&r1=1294027&r2=1294028&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeAdapter.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeAdapter.java Mon Feb 27 04:54:33 2012
@@ -19,9 +19,8 @@ package org.apache.hadoop.hdfs.server.da
 
 import java.io.IOException;
 
-import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
+import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.test.GenericTestUtils;
 import org.mockito.Mockito;
 
 import com.google.common.base.Preconditions;
@@ -57,7 +56,7 @@ public class DataNodeAdapter {
    * the given NameNode. This can be used to delay or wait for
    * RPC calls on the datanode->NN path.
    */
-  public static DatanodeProtocol spyOnBposToNN(
+  public static DatanodeProtocolClientSideTranslatorPB spyOnBposToNN(
       DataNode dn, NameNode nn) {
     String bpid = nn.getNamesystem().getBlockPoolId();
     
@@ -71,12 +70,8 @@ public class DataNodeAdapter {
     Preconditions.checkArgument(bpos != null,
         "No such bpid: %s", bpid);
 
-    // When protobufs are merged, the following can be converted
-    // to a simple spy. Because you can't spy on proxy objects,
-    // we have to use the DelegateAnswer trick.
-    DatanodeProtocol origNN = bpos.getBpNamenode();
-    DatanodeProtocol spy = Mockito.mock(DatanodeProtocol.class,
-        new GenericTestUtils.DelegateAnswer(origNN));
+    DatanodeProtocolClientSideTranslatorPB origNN = bpos.getBpNamenode();
+    DatanodeProtocolClientSideTranslatorPB spy = Mockito.spy(origNN);
 
     bpos.setBpNamenode(spy);
     return spy;

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java?rev=1294028&r1=1294027&r2=1294028&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java Mon Feb 27 04:54:33 2012
@@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
 import org.apache.log4j.Level;
@@ -146,8 +147,9 @@ public class TestBlockReport {
     DataNode dn = cluster.getDataNodes().get(DN_N0);
     String poolId = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
-    cluster.getNameNodeRpc().blockReport(dnR, poolId,
-      new BlockListAsLongs(blocks, null).getBlockListAsLongs());
+    StorageBlockReport[] report = { new StorageBlockReport(dnR.getStorageID(),
+        new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
+    cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
 
     List<LocatedBlock> blocksAfterReport =
       DFSTestUtil.getAllBlocks(fs.open(filePath));
@@ -180,7 +182,7 @@ public class TestBlockReport {
 
     Path filePath = new Path("/" + METHOD_NAME + ".dat");
     DFSTestUtil.createFile(fs, filePath,
-      (long) FILE_SIZE, REPL_FACTOR, rand.nextLong());
+      FILE_SIZE, REPL_FACTOR, rand.nextLong());
 
     // mock around with newly created blocks and delete some
     File dataDir = new File(cluster.getDataDirectory());
@@ -226,8 +228,9 @@ public class TestBlockReport {
     DataNode dn = cluster.getDataNodes().get(DN_N0);
     String poolId = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
-    cluster.getNameNodeRpc().blockReport(dnR, poolId,
-      new BlockListAsLongs(blocks, null).getBlockListAsLongs());
+    StorageBlockReport[] report = { new StorageBlockReport(dnR.getStorageID(),
+        new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
+    cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
 
     BlockManagerTestUtil.getComputedDatanodeWork(cluster.getNamesystem()
         .getBlockManager());
@@ -266,9 +269,10 @@ public class TestBlockReport {
     DataNode dn = cluster.getDataNodes().get(DN_N0);
     String poolId = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
+    StorageBlockReport[] report = { new StorageBlockReport(dnR.getStorageID(),
+        new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
     DatanodeCommand dnCmd =
-      cluster.getNameNodeRpc().blockReport(dnR, poolId,
-        new BlockListAsLongs(blocks, null).getBlockListAsLongs());
+      cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
     if(LOG.isDebugEnabled()) {
       LOG.debug("Got the command: " + dnCmd);
     }
@@ -284,9 +288,8 @@ public class TestBlockReport {
    * This test isn't a representative case for BlockReport
    * The empty method is going to be left here to keep the naming
    * of the test plan in synch with the actual implementation
-   * @throws IOException in case of errors
    */
-  public void blockReport_04() throws IOException {
+  public void blockReport_04() {
   }
 
   // Client requests new block from NN. The test corrupts this very block
@@ -295,7 +298,7 @@ public class TestBlockReport {
   // BlockScanner which is out of scope of this test
   // Keeping the name to be in synch with the test plan
   //
-  public void blockReport_05() throws IOException {
+  public void blockReport_05() {
   }
 
   /**
@@ -319,8 +322,9 @@ public class TestBlockReport {
     DataNode dn = cluster.getDataNodes().get(DN_N1);
     String poolId = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
-    cluster.getNameNodeRpc().blockReport(dnR, poolId,
-        new BlockListAsLongs(blocks, null).getBlockListAsLongs());
+    StorageBlockReport[] report = { new StorageBlockReport(dnR.getStorageID(),
+        new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
+    cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
     printStats();
     assertEquals("Wrong number of PendingReplication Blocks",
       0, cluster.getNamesystem().getUnderReplicatedBlocks());
@@ -368,8 +372,9 @@ public class TestBlockReport {
     DataNode dn = cluster.getDataNodes().get(DN_N1);
     String poolId = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
-    cluster.getNameNodeRpc().blockReport(dnR, poolId,
-        new BlockListAsLongs(blocks, null).getBlockListAsLongs());
+    StorageBlockReport[] report = { new StorageBlockReport(dnR.getStorageID(),
+        new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
+    cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
     printStats();
     assertEquals("Wrong number of Corrupted blocks",
       1, cluster.getNamesystem().getCorruptReplicaBlocks() +
@@ -390,8 +395,9 @@ public class TestBlockReport {
       LOG.debug("Done corrupting length of " + corruptedBlock.getBlockName());
     }
     
-    cluster.getNameNodeRpc().blockReport(dnR, poolId,
+    report[0] = new StorageBlockReport(dnR.getStorageID(),
         new BlockListAsLongs(blocks, null).getBlockListAsLongs());
+    cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
     printStats();
 
     assertEquals("Wrong number of Corrupted blocks",
@@ -440,8 +446,9 @@ public class TestBlockReport {
       DataNode dn = cluster.getDataNodes().get(DN_N1);
       String poolId = cluster.getNamesystem().getBlockPoolId();
       DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
-      cluster.getNameNodeRpc().blockReport(dnR, poolId,
-          new BlockListAsLongs(blocks, null).getBlockListAsLongs());
+      StorageBlockReport[] report = { new StorageBlockReport(dnR.getStorageID(),
+          new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
+      cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
       printStats();
       assertEquals("Wrong number of PendingReplication blocks",
         blocks.size(), cluster.getNamesystem().getPendingReplicationBlocks());
@@ -486,8 +493,9 @@ public class TestBlockReport {
       DataNode dn = cluster.getDataNodes().get(DN_N1);
       String poolId = cluster.getNamesystem().getBlockPoolId();
       DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
-      cluster.getNameNodeRpc().blockReport(dnR, poolId,
-          new BlockListAsLongs(blocks, null).getBlockListAsLongs());
+      StorageBlockReport[] report = { new StorageBlockReport(dnR.getStorageID(),
+          new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
+      cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
       printStats();
       assertEquals("Wrong number of PendingReplication blocks",
         2, cluster.getNamesystem().getPendingReplicationBlocks());
@@ -550,7 +558,7 @@ public class TestBlockReport {
         .when(spy).blockReport(
           Mockito.<DatanodeRegistration>anyObject(),
           Mockito.anyString(),
-          Mockito.<long[]>anyObject());
+          Mockito.<StorageBlockReport[]>anyObject());
       
       // Force a block report to be generated. The block report will have
       // an RBW replica in it. Wait for the RPC to be sent, but block
@@ -638,8 +646,7 @@ public class TestBlockReport {
   // Write file and start second data node.
   private ArrayList<Block> writeFile(final String METHOD_NAME,
                                                final long fileSize,
-                                               Path filePath)
-    throws IOException {
+                                               Path filePath) {
     ArrayList<Block> blocks = null;
     try {
       REPL_FACTOR = 2;

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java?rev=1294028&r1=1294027&r2=1294028&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java Mon Feb 27 04:54:33 2012
@@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.net.NetUtils;
 import org.junit.After;
 import org.junit.Before;
@@ -144,8 +145,9 @@ public class TestDataNodeVolumeFailure {
     DataNode dn = cluster.getDataNodes().get(1); //corresponds to dir data3
     String bpid = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(bpid);
-    long[] bReport = dn.getFSDataset().getBlockReport(bpid).getBlockListAsLongs();
-    cluster.getNameNodeRpc().blockReport(dnR, bpid, bReport);
+    StorageBlockReport[] report = { new StorageBlockReport(dnR.getStorageID(),
+        dn.getFSDataset().getBlockReport(bpid).getBlockListAsLongs()) };
+    cluster.getNameNodeRpc().blockReport(dnR, bpid, report);
 
     // verify number of blocks and files...
     verify(filename, filesize);

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java?rev=1294028&r1=1294027&r2=1294028&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java Mon Feb 27 04:54:33 2012
@@ -24,9 +24,8 @@ import static org.junit.Assert.fail;
 import static org.mockito.Mockito.*;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.junit.Test;
@@ -49,7 +48,8 @@ public class TestDatanodeRegister { 
 
     NamespaceInfo fakeNSInfo = mock(NamespaceInfo.class);
     when(fakeNSInfo.getBuildVersion()).thenReturn("NSBuildVersion");
-    DatanodeProtocol fakeDNProt = mock(DatanodeProtocol.class);
+    DatanodeProtocolClientSideTranslatorPB fakeDNProt = 
+        mock(DatanodeProtocolClientSideTranslatorPB.class);
     when(fakeDNProt.versionRequest()).thenReturn(fakeNSInfo);
 
     bpos.setNameNode( fakeDNProt );

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java?rev=1294028&r1=1294027&r2=1294028&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java Mon Feb 27 04:54:33 2012
@@ -28,6 +28,7 @@ import java.net.SocketTimeoutException;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.NetUtils;
 
@@ -81,7 +82,7 @@ public class TestInterDatanodeProtocol {
     }
 
     @Override
-    public Writable call(Class<?> protocol, Writable param, long receiveTime)
+    public Writable call(RpcKind rpcKind, String protocol, Writable param, long receiveTime)
         throws IOException {
       if (sleep) {
         // sleep a bit
@@ -149,7 +150,6 @@ public class TestInterDatanodeProtocol {
       DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
       InterDatanodeProtocol idp = DataNode.createInterDataNodeProtocolProxy(
           datanodeinfo[0], conf, datanode.getDnConf().socketTimeout);
-      assertTrue(datanode != null);
       
       //stop block scanner, so we could compare lastScanTime
       if (datanode.blockScanner != null) {
@@ -346,8 +346,8 @@ public class TestInterDatanodeProtocol {
   /** Test to verify that InterDatanode RPC timesout as expected when
    *  the server DN does not respond.
    */
-  @Test
-  public void testInterDNProtocolTimeout() throws Exception {
+  @Test(expected=SocketTimeoutException.class)
+  public void testInterDNProtocolTimeout() throws Throwable {
     final Server server = new TestServer(1, true);
     server.start();
 
@@ -360,10 +360,9 @@ public class TestInterDatanodeProtocol {
     try {
       proxy = DataNode.createInterDataNodeProtocolProxy(
           dInfo, conf, 500);
-      proxy.initReplicaRecovery(null);
+      proxy.initReplicaRecovery(new RecoveringBlock(
+          new ExtendedBlock("bpid", 1), null, 100));
       fail ("Expected SocketTimeoutException exception, but did not get.");
-    } catch (SocketTimeoutException e) {
-      DataNode.LOG.info("Got expected Exception: SocketTimeoutException" + e);
     } finally {
       if (proxy != null) {
         RPC.stopProxy(proxy);

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java?rev=1294028&r1=1294027&r2=1294028&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java Mon Feb 27 04:54:33 2012
@@ -397,12 +397,9 @@ public abstract class FSImageTestUtil {
    */
   public static EditLogFile findLatestEditsLog(StorageDirectory sd)
   throws IOException {
-    FSImageTransactionalStorageInspector inspector =
-      new FSImageTransactionalStorageInspector();
-    inspector.inspectDirectory(sd);
-    
-    List<EditLogFile> foundEditLogs = Lists.newArrayList(
-        inspector.getEditLogFiles());
+    File currentDir = sd.getCurrentDir();
+    List<EditLogFile> foundEditLogs 
+      = Lists.newArrayList(FileJournalManager.matchEditLogs(currentDir.listFiles()));
     return Collections.max(foundEditLogs, EditLogFile.COMPARE_BY_START_TXID);
   }
 

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1294028&r1=1294027&r2=1294028&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Mon Feb 27 04:54:33 2012
@@ -25,8 +25,6 @@ import java.util.Arrays;
 import java.util.EnumSet;
 import java.util.List;
 
-import javax.security.auth.login.LoginException;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -46,8 +44,13 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
+import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
+import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
+import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.io.EnumSetWritable;
@@ -103,7 +106,7 @@ public class NNThroughputBenchmark {
   static NameNode nameNode;
   static NamenodeProtocols nameNodeProto;
 
-  NNThroughputBenchmark(Configuration conf) throws IOException, LoginException {
+  NNThroughputBenchmark(Configuration conf) throws IOException {
     config = conf;
     // We do not need many handlers, since each thread simulates a handler
     // by calling name-node methods directly
@@ -124,7 +127,7 @@ public class NNThroughputBenchmark {
     nameNodeProto = nameNode.getRpcServer();
   }
 
-  void close() throws IOException {
+  void close() {
     nameNode.stop();
   }
 
@@ -794,7 +797,10 @@ public class NNThroughputBenchmark {
       dnRegistration.setStorageInfo(new DataStorage(nsInfo, ""));
       DataNode.setNewStorageID(dnRegistration);
       // register datanode
-      dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
+      
+      DatanodeStorage[] storages = { new DatanodeStorage(
+          dnRegistration.getStorageID(), DatanodeStorage.State.NORMAL) };
+      dnRegistration = nameNodeProto.registerDatanode(dnRegistration, storages);
     }
 
     /**
@@ -804,8 +810,10 @@ public class NNThroughputBenchmark {
     void sendHeartbeat() throws IOException {
       // register datanode
       // TODO:FEDERATION currently a single block pool is supported
+      StorageReport[] rep = { new StorageReport(dnRegistration.getStorageID(),
+          false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
       DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
-          DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0, 0);
+          rep, 0, 0, 0);
       if(cmds != null) {
         for (DatanodeCommand cmd : cmds ) {
           if(LOG.isDebugEnabled()) {
@@ -848,9 +856,10 @@ public class NNThroughputBenchmark {
     @SuppressWarnings("unused") // keep it for future blockReceived benchmark
     int replicateBlocks() throws IOException {
       // register datanode
-      // TODO:FEDERATION currently a single block pool is supported
+      StorageReport[] rep = { new StorageReport(dnRegistration.getStorageID(),
+          false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
       DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
-          DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0, 0);
+          rep, 0, 0, 0);
       if (cmds != null) {
         for (DatanodeCommand cmd : cmds) {
           if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
@@ -880,10 +889,12 @@ public class NNThroughputBenchmark {
           receivedDNReg.setStorageInfo(
                           new DataStorage(nsInfo, dnInfo.getStorageID()));
           receivedDNReg.setInfoPort(dnInfo.getInfoPort());
-          nameNodeProto.blockReceived( receivedDNReg, 
-                                  nameNode.getNamesystem().getBlockPoolId(),
-                                  new Block[] {blocks[i]},
-                                  new String[] {DataNode.EMPTY_DEL_HINT});
+          ReceivedDeletedBlockInfo[] rdBlocks = { new ReceivedDeletedBlockInfo(
+                  blocks[i], DataNode.EMPTY_DEL_HINT) };
+          StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
+              receivedDNReg.getStorageID(), rdBlocks) };
+          nameNodeProto.blockReceivedAndDeleted(receivedDNReg, nameNode
+              .getNamesystem().getBlockPoolId(), report);
         }
       }
       return blocks.length;
@@ -915,7 +926,7 @@ public class NNThroughputBenchmark {
       config.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 3 * 60);
       parseArguments(args);
       // adjust replication to the number of data-nodes
-      this.replication = (short)Math.min((int)replication, getNumDatanodes());
+      this.replication = (short)Math.min(replication, getNumDatanodes());
     }
 
     /**
@@ -995,11 +1006,12 @@ public class NNThroughputBenchmark {
         for(DatanodeInfo dnInfo : loc.getLocations()) {
           int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getName());
           datanodes[dnIdx].addBlock(loc.getBlock().getLocalBlock());
-          nameNodeProto.blockReceived(
-              datanodes[dnIdx].dnRegistration, 
-              loc.getBlock().getBlockPoolId(),
-              new Block[] {loc.getBlock().getLocalBlock()},
-              new String[] {""});
+          ReceivedDeletedBlockInfo[] rdBlocks = { new ReceivedDeletedBlockInfo(
+              loc.getBlock().getLocalBlock(), "") };
+          StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
+              datanodes[dnIdx].dnRegistration.getStorageID(), rdBlocks) };
+          nameNodeProto.blockReceivedAndDeleted(datanodes[dnIdx].dnRegistration, loc
+              .getBlock().getBlockPoolId(), report);
         }
       }
       return prevBlock;
@@ -1016,8 +1028,10 @@ public class NNThroughputBenchmark {
       assert daemonId < numThreads : "Wrong daemonId.";
       TinyDatanode dn = datanodes[daemonId];
       long start = System.currentTimeMillis();
+      StorageBlockReport[] report = { new StorageBlockReport(
+          dn.dnRegistration.getStorageID(), dn.getBlockReportList()) };
       nameNodeProto.blockReport(dn.dnRegistration, nameNode.getNamesystem()
-          .getBlockPoolId(), dn.getBlockReportList());
+          .getBlockPoolId(), report);
       long end = System.currentTimeMillis();
       return end-start;
     }

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java?rev=1294028&r1=1294027&r2=1294028&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java Mon Feb 27 04:54:33 2012
@@ -53,7 +53,7 @@ public class NameNodeAdapter {
    * @return rpc server
    */
   public static Server getRpcServer(NameNode namenode) {
-    return ((NameNodeRpcServer)namenode.getRpcServer()).server;
+    return ((NameNodeRpcServer)namenode.getRpcServer()).clientRpcServer;
   }
 
   public static DelegationTokenSecretManager getDtSecretManager(

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java?rev=1294028&r1=1294027&r2=1294028&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java Mon Feb 27 04:54:33 2012
@@ -84,8 +84,10 @@ public class TestCheckPointForSecurityTo
       for (StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
         EditLogFile log = FSImageTestUtil.findLatestEditsLog(sd);
         assertTrue(log.isInProgress());
+        log.validateLog();
+        long numTransactions = (log.getLastTxId() - log.getFirstTxId()) + 1;
         assertEquals("In-progress log " + log + " should have 5 transactions",
-            5, log.validateLog().numTransactions);
+                     5, numTransactions);;
       }
 
       // Saving image in safe mode should succeed
@@ -99,8 +101,10 @@ public class TestCheckPointForSecurityTo
       for (StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
         EditLogFile log = FSImageTestUtil.findLatestEditsLog(sd);
         assertTrue(log.isInProgress());
+        log.validateLog();
+        long numTransactions = (log.getLastTxId() - log.getFirstTxId()) + 1;
         assertEquals("In-progress log " + log + " should only have START txn",
-            1, log.validateLog().numTransactions);
+            1, numTransactions);
       }
 
       // restart cluster

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=1294028&r1=1294027&r2=1294028&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java Mon Feb 27 04:54:33 2012
@@ -1200,7 +1200,7 @@ public class TestCheckpoint extends Test
       CheckpointStorage spyImage1 = spyOnSecondaryImage(secondary1);
       DelayAnswer delayer = new DelayAnswer(LOG);
       Mockito.doAnswer(delayer).when(spyImage1)
-        .saveFSImageInAllDirs(Mockito.anyLong());
+        .saveFSImageInAllDirs(Mockito.<FSNamesystem>any(), Mockito.anyLong());
 
       // Set up a thread to do a checkpoint from the first 2NN
       DoCheckpointThread checkpointThread = new DoCheckpointThread(secondary1);

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java?rev=1294028&r1=1294027&r2=1294028&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java Mon Feb 27 04:54:33 2012
@@ -48,7 +48,7 @@ public class TestClusterId {
     // see if cluster id not empty.
     Collection<URI> dirsToFormat = FSNamesystem.getNamespaceDirs(config);
     Collection<URI> editsToFormat = new ArrayList<URI>(0);
-    FSImage fsImage = new FSImage(config, null, dirsToFormat, editsToFormat);
+    FSImage fsImage = new FSImage(config, dirsToFormat, editsToFormat);
     
     Iterator<StorageDirectory> sdit = 
       fsImage.getStorage().dirIterator(NNStorage.NameNodeDirType.IMAGE);

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java?rev=1294028&r1=1294027&r2=1294028&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java Mon Feb 27 04:54:33 2012
@@ -36,6 +36,11 @@ import org.apache.hadoop.hdfs.server.dat
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.RegisterCommand;
+import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
+import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
+import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
+import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.junit.After;
 import org.junit.Test;
 
@@ -104,21 +109,24 @@ public class TestDeadDatanode {
 
     DatanodeProtocol dnp = cluster.getNameNodeRpc();
     
-    Block[] blocks = new Block[] { new Block(0) };
-    String[] delHints = new String[] { "" };
+    ReceivedDeletedBlockInfo[] blocks = { new ReceivedDeletedBlockInfo(
+        new Block(0), "") };
+    StorageReceivedDeletedBlocks[] storageBlocks = { 
+        new StorageReceivedDeletedBlocks(reg.getStorageID(), blocks) };
     
     // Ensure blockReceived call from dead datanode is rejected with IOException
     try {
-      dnp.blockReceived(reg, poolId, blocks, delHints);
+      dnp.blockReceivedAndDeleted(reg, poolId, storageBlocks);
       Assert.fail("Expected IOException is not thrown");
     } catch (IOException ex) {
       // Expected
     }
 
     // Ensure blockReport from dead datanode is rejected with IOException
-    long[] blockReport = new long[] { 0L, 0L, 0L };
+    StorageBlockReport[] report = { new StorageBlockReport(reg.getStorageID(),
+        new long[] { 0L, 0L, 0L }) };
     try {
-      dnp.blockReport(reg, poolId, blockReport);
+      dnp.blockReport(reg, poolId, report);
       Assert.fail("Expected IOException is not thrown");
     } catch (IOException ex) {
       // Expected
@@ -126,9 +134,11 @@ public class TestDeadDatanode {
 
     // Ensure heartbeat from dead datanode is rejected with a command
     // that asks datanode to register again
-    DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, 0, 0, 0, 0, 0, 0, 0);
+    StorageReport[] rep = { new StorageReport(reg.getStorageID(), false, 0, 0,
+        0, 0) };
+    DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, rep, 0, 0, 0);
     Assert.assertEquals(1, cmd.length);
-    Assert.assertEquals(cmd[0].getAction(), DatanodeCommand.REGISTER
+    Assert.assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER
         .getAction());
   }
 }

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1294028&r1=1294027&r2=1294028&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Mon Feb 27 04:54:33 2012
@@ -23,6 +23,9 @@ import java.net.URI;
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Arrays;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
@@ -37,6 +40,7 @@ import org.apache.hadoop.fs.ChecksumExce
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.*;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -54,6 +58,7 @@ import org.aspectj.util.FileUtil;
 import org.mockito.Mockito;
 import org.junit.Test;
 
+import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
 
 import static org.apache.hadoop.test.MetricsAsserts.*;
@@ -76,7 +81,7 @@ public class TestEditLog extends TestCas
   static final int NUM_TRANSACTIONS = 100;
   static final int NUM_THREADS = 100;
   
-  private static final File TEST_DIR = new File(
+  static final File TEST_DIR = new File(
     System.getProperty("test.build.data","build/test/data"));
 
   /** An edits log with 3 edits from 0.20 - the result of
@@ -631,13 +636,23 @@ public class TestEditLog extends TestCas
   }
   
   public void testCrashRecoveryEmptyLogOneDir() throws Exception {
-    doTestCrashRecoveryEmptyLog(false);
+    doTestCrashRecoveryEmptyLog(false, true);
   }
   
   public void testCrashRecoveryEmptyLogBothDirs() throws Exception {
-    doTestCrashRecoveryEmptyLog(true);
+    doTestCrashRecoveryEmptyLog(true, true);
+  }
+
+  public void testCrashRecoveryEmptyLogOneDirNoUpdateSeenTxId() 
+      throws Exception {
+    doTestCrashRecoveryEmptyLog(false, false);
   }
   
+  public void testCrashRecoveryEmptyLogBothDirsNoUpdateSeenTxId()
+      throws Exception {
+    doTestCrashRecoveryEmptyLog(true, false);
+  }
+
   /**
    * Test that the NN handles the corruption properly
    * after it crashes just after creating an edit log
@@ -650,8 +665,14 @@ public class TestEditLog extends TestCas
    * will only be in one of the directories. In both cases, the
    * NN should fail to start up, because it's aware that txid 3
    * was reached, but unable to find a non-corrupt log starting there.
+   * @param updateTransactionIdFile if true update the seen_txid file.
+   * If false, the it will not be updated. This will simulate a case 
+   * where the NN crashed between creating the new segment and updating
+   * seen_txid. 
    */
-  private void doTestCrashRecoveryEmptyLog(boolean inBothDirs) throws Exception {
+  private void doTestCrashRecoveryEmptyLog(boolean inBothDirs, 
+                                           boolean updateTransactionIdFile) 
+      throws Exception {
     // start a cluster 
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = null;
@@ -669,6 +690,14 @@ public class TestEditLog extends TestCas
       // Make a truncated edits_3_inprogress
       File log = new File(currentDir,
           NNStorage.getInProgressEditsFileName(3));
+      NNStorage storage = new NNStorage(conf, 
+                                        Collections.<URI>emptyList(),
+                                        Lists.newArrayList(uri));
+      if (updateTransactionIdFile) {
+        storage.writeTransactionIdFileToStorage(3);
+      }
+      storage.close();
+
       new EditLogFileOutputStream(log, 1024).create();
       if (!inBothDirs) {
         break;
@@ -679,9 +708,9 @@ public class TestEditLog extends TestCas
       cluster = new MiniDFSCluster.Builder(conf)
         .numDataNodes(NUM_DATA_NODES).format(false).build();
       fail("Did not fail to start with all-corrupt logs");
-    } catch (IllegalStateException ise) {
+    } catch (IOException ioe) {
       GenericTestUtils.assertExceptionContains(
-          "No non-corrupt logs for txid 3", ise);
+          "No non-corrupt logs for txid 3", ioe);
     }
     cluster.shutdown();
   }
@@ -706,8 +735,18 @@ public class TestEditLog extends TestCas
             
       reader = new FSEditLogOp.Reader(in, version);
     }
+  
+    @Override
+    public long getFirstTxId() throws IOException {
+      return HdfsConstants.INVALID_TXID;
+    }
     
     @Override
+    public long getLastTxId() throws IOException {
+      return HdfsConstants.INVALID_TXID;
+    }
+  
+    @Override
     public long length() throws IOException {
       return len;
     }
@@ -856,6 +895,168 @@ public class TestEditLog extends TestCas
     Mockito.doReturn(sds).when(storage).dirIterable(NameNodeDirType.EDITS);
     return storage;
   }
-  
-  
+
+  /** 
+   * Specification for a failure during #setupEdits
+   */
+  static class AbortSpec {
+    final int roll;
+    final int logindex;
+    
+    /**
+     * Construct the failure specification. 
+     * @param roll number to fail after. e.g. 1 to fail after the first roll
+     * @param loginfo index of journal to fail. 
+     */
+    AbortSpec(int roll, int logindex) {
+      this.roll = roll;
+      this.logindex = logindex;
+    }
+  }
+
+  final static int TXNS_PER_ROLL = 10;  
+  final static int TXNS_PER_FAIL = 2;
+    
+  /**
+   * Set up directories for tests. 
+   *
+   * Each rolled file is 10 txns long. 
+   * A failed file is 2 txns long.
+   * 
+   * @param editUris directories to create edit logs in
+   * @param numrolls number of times to roll the edit log during setup
+   * @param abortAtRolls Specifications for when to fail, see AbortSpec
+   */
+  public static NNStorage setupEdits(List<URI> editUris, int numrolls, 
+                                     AbortSpec... abortAtRolls)
+      throws IOException {
+    List<AbortSpec> aborts = new ArrayList<AbortSpec>(Arrays.asList(abortAtRolls));
+    NNStorage storage = new NNStorage(new Configuration(),
+                                      Collections.<URI>emptyList(),
+                                      editUris);
+    storage.format("test-cluster-id");
+    FSEditLog editlog = new FSEditLog(storage);    
+    // open the edit log and add two transactions
+    // logGenerationStamp is used, simply because it doesn't 
+    // require complex arguments.
+    editlog.open();
+    for (int i = 2; i < TXNS_PER_ROLL; i++) {
+      editlog.logGenerationStamp((long)0);
+    }
+    editlog.logSync();
+    
+    // Go into edit log rolling loop.
+    // On each roll, the abortAtRolls abort specs are 
+    // checked to see if an abort is required. If so the 
+    // the specified journal is aborted. It will be brought
+    // back into rotation automatically by rollEditLog
+    for (int i = 0; i < numrolls; i++) {
+      editlog.rollEditLog();
+      
+      editlog.logGenerationStamp((long)i);
+      editlog.logSync();
+
+      while (aborts.size() > 0 
+             && aborts.get(0).roll == (i+1)) {
+        AbortSpec spec = aborts.remove(0);
+        editlog.getJournals().get(spec.logindex).abort();
+      } 
+      
+      for (int j = 3; j < TXNS_PER_ROLL; j++) {
+        editlog.logGenerationStamp((long)i);
+      }
+      editlog.logSync();
+    }
+    editlog.close();
+
+    FSImageTestUtil.logStorageContents(LOG, storage);
+    return storage;
+  }
+
+  /** 
+   * Test loading an editlog which has had both its storage fail
+   * on alternating rolls. Two edit log directories are created.
+   * The first on fails on odd rolls, the second on even. Test
+   * that we are able to load the entire editlog regardless.
+   */
+  @Test
+  public void testAlternatingJournalFailure() throws IOException {
+    File f1 = new File(TEST_DIR + "/alternatingjournaltest0");
+    File f2 = new File(TEST_DIR + "/alternatingjournaltest1");
+
+    List<URI> editUris = ImmutableList.of(f1.toURI(), f2.toURI());
+    
+    NNStorage storage = setupEdits(editUris, 10,
+                                   new AbortSpec(1, 0),
+                                   new AbortSpec(2, 1),
+                                   new AbortSpec(3, 0),
+                                   new AbortSpec(4, 1),
+                                   new AbortSpec(5, 0),
+                                   new AbortSpec(6, 1),
+                                   new AbortSpec(7, 0),
+                                   new AbortSpec(8, 1),
+                                   new AbortSpec(9, 0),
+                                   new AbortSpec(10, 1));
+    long totaltxnread = 0;
+    FSEditLog editlog = new FSEditLog(storage);
+    long startTxId = 1;
+    Iterable<EditLogInputStream> editStreams = editlog.selectInputStreams(startTxId, 
+                                                                          TXNS_PER_ROLL*11);
+
+    for (EditLogInputStream edits : editStreams) {
+      FSEditLogLoader.EditLogValidation val = FSEditLogLoader.validateEditLog(edits);
+      long read = val.getNumTransactions();
+      LOG.info("Loading edits " + edits + " read " + read);
+      assertEquals(startTxId, val.getStartTxId());
+      startTxId += read;
+      totaltxnread += read;
+    }
+
+    editlog.close();
+    storage.close();
+    assertEquals(TXNS_PER_ROLL*11, totaltxnread);    
+  }
+
+  /** 
+   * Test loading an editlog with gaps. A single editlog directory
+   * is set up. On of the edit log files is deleted. This should
+   * fail when selecting the input streams as it will not be able 
+   * to select enough streams to load up to 4*TXNS_PER_ROLL.
+   * There should be 4*TXNS_PER_ROLL transactions as we rolled 3
+   * times. 
+   */
+  @Test
+  public void testLoadingWithGaps() throws IOException {
+    File f1 = new File(TEST_DIR + "/gaptest0");
+    List<URI> editUris = ImmutableList.of(f1.toURI());
+
+    NNStorage storage = setupEdits(editUris, 3);
+    
+    final long startGapTxId = 1*TXNS_PER_ROLL + 1;
+    final long endGapTxId = 2*TXNS_PER_ROLL;
+
+    File[] files = new File(f1, "current").listFiles(new FilenameFilter() {
+        public boolean accept(File dir, String name) {
+          if (name.startsWith(NNStorage.getFinalizedEditsFileName(startGapTxId, 
+                                  endGapTxId))) {
+            return true;
+          }
+          return false;
+        }
+      });
+    assertEquals(1, files.length);
+    assertTrue(files[0].delete());
+    
+    FSEditLog editlog = new FSEditLog(storage);
+    long startTxId = 1;
+    try {
+      Iterable<EditLogInputStream> editStreams 
+        = editlog.selectInputStreams(startTxId, 4*TXNS_PER_ROLL);
+      
+      fail("Should have thrown exception");
+    } catch (IOException ioe) {
+      GenericTestUtils.assertExceptionContains(
+          "No non-corrupt logs for txid " + startGapTxId, ioe);
+    }
+  }
 }

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java?rev=1294028&r1=1294027&r2=1294028&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java Mon Feb 27 04:54:33 2012
@@ -63,8 +63,8 @@ public class TestEditLogFileOutputStream
 
     EditLogValidation validation = EditLogFileInputStream.validateEditLog(editLog);
     assertEquals("Edit log should contain a header as valid length",
-        HEADER_LEN, validation.validLength);
-    assertEquals(1, validation.numTransactions);
+        HEADER_LEN, validation.getValidLength());
+    assertEquals(1, validation.getNumTransactions());
     assertEquals("Edit log should have 1MB of bytes allocated",
         1024*1024, editLog.length());
     
@@ -72,12 +72,12 @@ public class TestEditLogFileOutputStream
     cluster.getFileSystem().mkdirs(new Path("/tmp"),
         new FsPermission((short)777));
 
-    long oldLength = validation.validLength;
+    long oldLength = validation.getValidLength();
     validation = EditLogFileInputStream.validateEditLog(editLog);
     assertTrue("Edit log should have more valid data after writing a txn " +
-        "(was: " + oldLength + " now: " + validation.validLength + ")",
-        validation.validLength > oldLength);
-    assertEquals(2, validation.numTransactions);
+        "(was: " + oldLength + " now: " + validation.getValidLength() + ")",
+        validation.getValidLength() > oldLength);
+    assertEquals(2, validation.getNumTransactions());
 
     assertEquals("Edit log should be 1MB long",
         1024 * 1024, editLog.length());

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java?rev=1294028&r1=1294027&r2=1294028&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java Mon Feb 27 04:54:33 2012
@@ -350,7 +350,7 @@ public class TestEditLogRace {
     Configuration conf = getConf();
     NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
     DFSTestUtil.formatNameNode(conf);
-    final FSNamesystem namesystem = new FSNamesystem(conf);
+    final FSNamesystem namesystem = FSNamesystem.loadFromDisk(conf);
 
     try {
       FSImage fsimage = namesystem.getFSImage();
@@ -448,7 +448,7 @@ public class TestEditLogRace {
     Configuration conf = getConf();
     NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
     DFSTestUtil.formatNameNode(conf);
-    final FSNamesystem namesystem = new FSNamesystem(conf);
+    final FSNamesystem namesystem = FSNamesystem.loadFromDisk(conf);
 
     try {
       FSImage fsimage = namesystem.getFSImage();

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java?rev=1294028&r1=1294027&r2=1294028&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java Mon Feb 27 04:54:33 2012
@@ -189,8 +189,8 @@ public class TestFSEditLogLoader {
     // Make sure that uncorrupted log has the expected length and number
     // of transactions.
     EditLogValidation validation = EditLogFileInputStream.validateEditLog(logFile);
-    assertEquals(NUM_TXNS + 2, validation.numTransactions);
-    assertEquals(validLength, validation.validLength);
+    assertEquals(NUM_TXNS + 2, validation.getNumTransactions());
+    assertEquals(validLength, validation.getValidLength());
     
     // Back up the uncorrupted log
     File logFileBak = new File(testDir, logFile.getName() + ".bak");
@@ -206,8 +206,8 @@ public class TestFSEditLogLoader {
       truncateFile(logFile, txOffset);
       validation = EditLogFileInputStream.validateEditLog(logFile);
       assertEquals("Failed when truncating to length " + txOffset,
-          txid - 1, validation.numTransactions);
-      assertEquals(txOffset, validation.validLength);
+          txid - 1, validation.getNumTransactions());
+      assertEquals(txOffset, validation.getValidLength());
 
       // Restore backup, truncate the file with one byte in the txn,
       // also isn't valid
@@ -215,24 +215,24 @@ public class TestFSEditLogLoader {
       truncateFile(logFile, txOffset + 1);
       validation = EditLogFileInputStream.validateEditLog(logFile);
       assertEquals("Failed when truncating to length " + (txOffset + 1),
-          txid - 1, validation.numTransactions);
-      assertEquals(txOffset, validation.validLength);
+          txid - 1, validation.getNumTransactions());
+      assertEquals(txOffset, validation.getValidLength());
 
       // Restore backup, corrupt the txn opcode
       Files.copy(logFileBak, logFile);
       corruptByteInFile(logFile, txOffset);
       validation = EditLogFileInputStream.validateEditLog(logFile);
       assertEquals("Failed when corrupting txn opcode at " + txOffset,
-          txid - 1, validation.numTransactions);
-      assertEquals(txOffset, validation.validLength);
+          txid - 1, validation.getNumTransactions());
+      assertEquals(txOffset, validation.getValidLength());
 
       // Restore backup, corrupt a byte a few bytes into the txn
       Files.copy(logFileBak, logFile);
       corruptByteInFile(logFile, txOffset+5);
       validation = EditLogFileInputStream.validateEditLog(logFile);
       assertEquals("Failed when corrupting txn data at " + (txOffset+5),
-          txid - 1, validation.numTransactions);
-      assertEquals(txOffset, validation.validLength);
+          txid - 1, validation.getNumTransactions());
+      assertEquals(txOffset, validation.getValidLength());
     }
     
     // Corrupt the log at every offset to make sure that validation itself
@@ -243,8 +243,8 @@ public class TestFSEditLogLoader {
       Files.copy(logFileBak, logFile);
       corruptByteInFile(logFile, offset);
       EditLogValidation val = EditLogFileInputStream.validateEditLog(logFile);
-      assertTrue(val.numTransactions >= prevNumValid);
-      prevNumValid = val.numTransactions;
+      assertTrue(val.getNumTransactions() >= prevNumValid);
+      prevNumValid = val.getNumTransactions();
     }
   }
 

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java?rev=1294028&r1=1294027&r2=1294028&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java Mon Feb 27 04:54:33 2012
@@ -36,9 +36,6 @@ import static org.apache.hadoop.hdfs.ser
 
 import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
 import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile;
-import org.apache.hadoop.hdfs.server.namenode.FSImageTransactionalStorageInspector.TransactionalLoadPlan;
-import org.apache.hadoop.hdfs.server.namenode.FSImageTransactionalStorageInspector.LogGroup;
-import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.LoadPlan;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -63,335 +60,14 @@ public class TestFSImageStorageInspector
         "/foo/current/" + getInProgressEditsFileName(457));
 
     inspector.inspectDirectory(mockDir);
-    mockLogValidation(inspector,
-        "/foo/current/" + getInProgressEditsFileName(457), 10);
-    
-    assertEquals(2, inspector.foundEditLogs.size());
     assertEquals(2, inspector.foundImages.size());
-    assertTrue(inspector.foundEditLogs.get(1).isInProgress());
-    
+
     FSImageFile latestImage = inspector.getLatestImage();
     assertEquals(456, latestImage.txId);
     assertSame(mockDir, latestImage.sd);
     assertTrue(inspector.isUpgradeFinalized());
     
-    LoadPlan plan = inspector.createLoadPlan();
-    LOG.info("Plan: " + plan);
-    
     assertEquals(new File("/foo/current/"+getImageFileName(456)), 
-                 plan.getImageFile());
-    assertArrayEquals(new File[] {
-        new File("/foo/current/" + getInProgressEditsFileName(457)) },
-        plan.getEditsFiles().toArray(new File[0]));
-  }
-  
-  /**
-   * Test that we check for gaps in txids when devising a load plan.
-   */
-  @Test
-  public void testPlanWithGaps() throws IOException {
-    FSImageTransactionalStorageInspector inspector =
-        new FSImageTransactionalStorageInspector();
-    
-    StorageDirectory mockDir = FSImageTestUtil.mockStorageDirectory(
-        NameNodeDirType.IMAGE_AND_EDITS,
-        false,
-        "/foo/current/" + getImageFileName(123),
-        "/foo/current/" + getImageFileName(456),
-        "/foo/current/" + getFinalizedEditsFileName(457,900),
-        "/foo/current/" + getFinalizedEditsFileName(901,950),
-        "/foo/current/" + getFinalizedEditsFileName(952,1000)); // <-- missing edit 951!
-
-    inspector.inspectDirectory(mockDir);
-    try {
-      inspector.createLoadPlan();
-      fail("Didn't throw IOE trying to load with gaps in edits");
-    } catch (IOException ioe) {
-      assertTrue(ioe.getMessage().contains(
-          "would start at txid 951 but starts at txid 952"));
-    }
-  }
-  
-  /**
-   * Test the case where an in-progress log comes in the middle of a sequence
-   * of logs
-   */
-  @Test
-  public void testPlanWithInProgressInMiddle() throws IOException {
-    FSImageTransactionalStorageInspector inspector =
-        new FSImageTransactionalStorageInspector();
-    
-    StorageDirectory mockDir = FSImageTestUtil.mockStorageDirectory(
-        NameNodeDirType.IMAGE_AND_EDITS,
-        false,
-        "/foo/current/" + getImageFileName(123),
-        "/foo/current/" + getImageFileName(456),
-        "/foo/current/" + getFinalizedEditsFileName(457,900),
-        "/foo/current/" + getInProgressEditsFileName(901), // <-- inprogress in middle
-        "/foo/current/" + getFinalizedEditsFileName(952,1000));
-
-    inspector.inspectDirectory(mockDir);
-    mockLogValidation(inspector,
-        "/foo/current/" + getInProgressEditsFileName(901), 51);
-
-    LoadPlan plan = inspector.createLoadPlan();
-    LOG.info("Plan: " + plan);
-    
-    assertEquals(new File("/foo/current/" + getImageFileName(456)), 
-                 plan.getImageFile());
-    assertArrayEquals(new File[] {
-        new File("/foo/current/" + getFinalizedEditsFileName(457,900)),
-        new File("/foo/current/" + getInProgressEditsFileName(901)),
-        new File("/foo/current/" + getFinalizedEditsFileName(952,1000)) },
-        plan.getEditsFiles().toArray(new File[0]));
-
-  }
-
-  
-  /**
-   * Test case for the usual case where no recovery of a log group is necessary
-   * (i.e all logs have the same start and end txids and finalized)
-   */
-  @Test
-  public void testLogGroupRecoveryNoop() throws IOException {
-    FSImageTransactionalStorageInspector inspector =
-        new FSImageTransactionalStorageInspector();
-
-    inspector.inspectDirectory(
-        mockDirectoryWithEditLogs("/foo1/current/" 
-                                  + getFinalizedEditsFileName(123,456)));
-    inspector.inspectDirectory(
-        mockDirectoryWithEditLogs("/foo2/current/"
-                                  + getFinalizedEditsFileName(123,456)));
-    inspector.inspectDirectory(
-        mockDirectoryWithEditLogs("/foo3/current/"
-                                  + getFinalizedEditsFileName(123,456)));
-    LogGroup lg = inspector.logGroups.get(123L);
-    assertEquals(3, lg.logs.size());
-    
-    lg.planRecovery();
-    
-    assertFalse(lg.logs.get(0).isCorrupt());
-    assertFalse(lg.logs.get(1).isCorrupt());
-    assertFalse(lg.logs.get(2).isCorrupt());
-  }
-  
-  /**
-   * Test case where we have some in-progress and some finalized logs
-   * for a given txid.
-   */
-  @Test
-  public void testLogGroupRecoveryMixed() throws IOException {
-    FSImageTransactionalStorageInspector inspector =
-        new FSImageTransactionalStorageInspector();
-
-    inspector.inspectDirectory(
-        mockDirectoryWithEditLogs("/foo1/current/"
-                                  + getFinalizedEditsFileName(123,456)));
-    inspector.inspectDirectory(
-        mockDirectoryWithEditLogs("/foo2/current/"
-                                  + getFinalizedEditsFileName(123,456)));
-    inspector.inspectDirectory(
-        mockDirectoryWithEditLogs("/foo3/current/"
-                                  + getInProgressEditsFileName(123)));
-    inspector.inspectDirectory(FSImageTestUtil.mockStorageDirectory(
-        NameNodeDirType.IMAGE,
-        false,
-        "/foo4/current/" + getImageFileName(122)));
-
-    LogGroup lg = inspector.logGroups.get(123L);
-    assertEquals(3, lg.logs.size());
-    EditLogFile inProgressLog = lg.logs.get(2);
-    assertTrue(inProgressLog.isInProgress());
-    
-    LoadPlan plan = inspector.createLoadPlan();
-
-    // Check that it was marked corrupt.
-    assertFalse(lg.logs.get(0).isCorrupt());
-    assertFalse(lg.logs.get(1).isCorrupt());
-    assertTrue(lg.logs.get(2).isCorrupt());
-
-    
-    // Calling recover should move it aside
-    inProgressLog = spy(inProgressLog);
-    Mockito.doNothing().when(inProgressLog).moveAsideCorruptFile();
-    lg.logs.set(2, inProgressLog);
-    
-    plan.doRecovery();
-    
-    Mockito.verify(inProgressLog).moveAsideCorruptFile();
-  }
-  
-  /**
-   * Test case where we have finalized logs with different end txids
-   */
-  @Test
-  public void testLogGroupRecoveryInconsistentEndTxIds() throws IOException {
-    FSImageTransactionalStorageInspector inspector =
-        new FSImageTransactionalStorageInspector();
-    inspector.inspectDirectory(
-        mockDirectoryWithEditLogs("/foo1/current/"
-                                  + getFinalizedEditsFileName(123,456)));
-    inspector.inspectDirectory(
-        mockDirectoryWithEditLogs("/foo2/current/"
-                                  + getFinalizedEditsFileName(123,678)));
-
-    LogGroup lg = inspector.logGroups.get(123L);
-    assertEquals(2, lg.logs.size());
-
-    try {
-      lg.planRecovery();
-      fail("Didn't throw IOE on inconsistent end txids");
-    } catch (IOException ioe) {
-      assertTrue(ioe.getMessage().contains("More than one ending txid"));
-    }
-  }
-
-  /**
-   * Test case where we have only in-progress logs and need to synchronize
-   * based on valid length.
-   */
-  @Test
-  public void testLogGroupRecoveryInProgress() throws IOException {
-    String paths[] = new String[] {
-        "/foo1/current/" + getInProgressEditsFileName(123),
-        "/foo2/current/" + getInProgressEditsFileName(123),
-        "/foo3/current/" + getInProgressEditsFileName(123)
-    };
-    FSImageTransactionalStorageInspector inspector =
-        new FSImageTransactionalStorageInspector();
-    inspector.inspectDirectory(mockDirectoryWithEditLogs(paths[0]));
-    inspector.inspectDirectory(mockDirectoryWithEditLogs(paths[1]));
-    inspector.inspectDirectory(mockDirectoryWithEditLogs(paths[2]));
-
-    // Inject spies to return the valid counts we would like to see
-    mockLogValidation(inspector, paths[0], 2000);
-    mockLogValidation(inspector, paths[1], 2000);
-    mockLogValidation(inspector, paths[2], 1000);
-
-    LogGroup lg = inspector.logGroups.get(123L);
-    assertEquals(3, lg.logs.size());
-    
-    lg.planRecovery();
-    
-    // Check that the short one was marked corrupt
-    assertFalse(lg.logs.get(0).isCorrupt());
-    assertFalse(lg.logs.get(1).isCorrupt());
-    assertTrue(lg.logs.get(2).isCorrupt());
-    
-    // Calling recover should move it aside
-    EditLogFile badLog = lg.logs.get(2);
-    Mockito.doNothing().when(badLog).moveAsideCorruptFile();
-    Mockito.doNothing().when(lg.logs.get(0)).finalizeLog();
-    Mockito.doNothing().when(lg.logs.get(1)).finalizeLog();
-    
-    lg.recover();
-    
-    Mockito.verify(badLog).moveAsideCorruptFile();
-    Mockito.verify(lg.logs.get(0)).finalizeLog();
-    Mockito.verify(lg.logs.get(1)).finalizeLog();
-  }
-
-  /**
-   * Mock out the log at the given path to return a specified number
-   * of transactions upon validation.
-   */
-  private void mockLogValidation(
-      FSImageTransactionalStorageInspector inspector,
-      String path, int numValidTransactions) throws IOException {
-    
-    for (LogGroup lg : inspector.logGroups.values()) {
-      List<EditLogFile> logs = lg.logs;
-      for (int i = 0; i < logs.size(); i++) {
-        EditLogFile log = logs.get(i);
-        if (log.getFile().getPath().equals(path)) {
-          // mock out its validation
-          EditLogFile spyLog = spy(log);
-          doReturn(new FSEditLogLoader.EditLogValidation(-1, numValidTransactions))
-            .when(spyLog).validateLog();
-          logs.set(i, spyLog);
-          return;
-        }
-      }
-    }
-    fail("No log found to mock out at " + path);
-  }
-
-  /**
-   * Test when edits and image are in separate directories.
-   */
-  @Test
-  public void testCurrentSplitEditsAndImage() throws IOException {
-    FSImageTransactionalStorageInspector inspector =
-        new FSImageTransactionalStorageInspector();
-    
-    StorageDirectory mockImageDir = FSImageTestUtil.mockStorageDirectory(
-        NameNodeDirType.IMAGE,
-        false,
-        "/foo/current/" + getImageFileName(123));
-    StorageDirectory mockImageDir2 = FSImageTestUtil.mockStorageDirectory(
-        NameNodeDirType.IMAGE,
-        false,
-        "/foo2/current/" + getImageFileName(456));
-    StorageDirectory mockEditsDir = FSImageTestUtil.mockStorageDirectory(
-        NameNodeDirType.EDITS,
-        false,
-        "/foo3/current/" + getFinalizedEditsFileName(123, 456),
-        "/foo3/current/" + getInProgressEditsFileName(457));
-    
-    inspector.inspectDirectory(mockImageDir);
-    inspector.inspectDirectory(mockEditsDir);
-    inspector.inspectDirectory(mockImageDir2);
-    
-    mockLogValidation(inspector,
-        "/foo3/current/" + getInProgressEditsFileName(457), 2);
-
-    assertEquals(2, inspector.foundEditLogs.size());
-    assertEquals(2, inspector.foundImages.size());
-    assertTrue(inspector.foundEditLogs.get(1).isInProgress());
-    assertTrue(inspector.isUpgradeFinalized());    
-
-    // Check plan
-    TransactionalLoadPlan plan =
-      (TransactionalLoadPlan)inspector.createLoadPlan();
-    FSImageFile pickedImage = plan.image;
-    assertEquals(456, pickedImage.txId);
-    assertSame(mockImageDir2, pickedImage.sd);
-    assertEquals(new File("/foo2/current/" + getImageFileName(456)),
-                 plan.getImageFile());
-    assertArrayEquals(new File[] {
-        new File("/foo3/current/" + getInProgressEditsFileName(457))
-      }, plan.getEditsFiles().toArray(new File[0]));
-  }
-  
-  /**
-   * Test case where an in-progress log is in an earlier name directory
-   * than a finalized log. Previously, getEditLogManifest wouldn't
-   * see this log.
-   */
-  @Test
-  public void testLogManifestInProgressComesFirst() throws IOException { 
-    FSImageTransactionalStorageInspector inspector =
-        new FSImageTransactionalStorageInspector();
-    inspector.inspectDirectory(
-        mockDirectoryWithEditLogs("/foo1/current/" 
-                                  + getFinalizedEditsFileName(2622,2623),
-                                  "/foo1/current/"
-                                  + getFinalizedEditsFileName(2624,2625),
-                                  "/foo1/current/"
-                                  + getInProgressEditsFileName(2626)));
-    inspector.inspectDirectory(
-        mockDirectoryWithEditLogs("/foo2/current/"
-                                  + getFinalizedEditsFileName(2622,2623),
-                                  "/foo2/current/"
-                                  + getFinalizedEditsFileName(2624,2625),
-                                  "/foo2/current/"
-                                  + getFinalizedEditsFileName(2626,2627),
-                                  "/foo2/current/"
-                                  + getFinalizedEditsFileName(2628,2629)));
-  }  
-  
-  static StorageDirectory mockDirectoryWithEditLogs(String... fileNames) {
-    return FSImageTestUtil.mockStorageDirectory(NameNodeDirType.EDITS, false, fileNames);
+        latestImage.getFile());
   }
 }



Mime
View raw message