hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1346682 [6/9] - in /hadoop/common/branches/HDFS-3092/hadoop-hdfs-project: ./ hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/ hadoop-hdfs/ hadoop-hdfs/dev-support/ hadoop-hdfs/src/contrib/bkjournal/ ha...
Date Wed, 06 Jun 2012 00:18:04 GMT
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java Wed Jun  6 00:17:38 2012
@@ -101,18 +101,18 @@ public class TestGetBlocks extends TestC
       BlockWithLocations[] locs;
       locs = namenode.getBlocks(dataNodes[0], fileLen).getBlocks();
       assertEquals(locs.length, 2);
-      assertEquals(locs[0].getDatanodes().length, 2);
-      assertEquals(locs[1].getDatanodes().length, 2);
+      assertEquals(locs[0].getStorageIDs().length, 2);
+      assertEquals(locs[1].getStorageIDs().length, 2);
 
       // get blocks of size BlockSize from dataNodes[0]
       locs = namenode.getBlocks(dataNodes[0], DEFAULT_BLOCK_SIZE).getBlocks();
       assertEquals(locs.length, 1);
-      assertEquals(locs[0].getDatanodes().length, 2);
+      assertEquals(locs[0].getStorageIDs().length, 2);
 
       // get blocks of size 1 from dataNodes[0]
       locs = namenode.getBlocks(dataNodes[0], 1).getBlocks();
       assertEquals(locs.length, 1);
-      assertEquals(locs[0].getDatanodes().length, 2);
+      assertEquals(locs[0].getStorageIDs().length, 2);
 
       // get blocks of size 0 from dataNodes[0]
       getBlocksWithException(namenode, dataNodes[0], 0);     

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java Wed Jun  6 00:17:38 2012
@@ -875,8 +875,8 @@ public class TestQuota {
       // 6kb block
       // 192kb quota
       final int FILE_SIZE = 1024;
-      final int QUOTA_SIZE = 32 * (int) fs.getDefaultBlockSize();
-      assertEquals(6 * 1024, fs.getDefaultBlockSize());
+      final int QUOTA_SIZE = 32 * (int) fs.getDefaultBlockSize(dir);
+      assertEquals(6 * 1024, fs.getDefaultBlockSize(dir));
       assertEquals(192 * 1024, QUOTA_SIZE);
 
       // Create the dir and set the quota. We need to enable the quota before
@@ -903,7 +903,7 @@ public class TestQuota {
       assertEquals("Invalid space consumed", 59 * FILE_SIZE * 3,
           c.getSpaceConsumed());
       assertEquals("Invalid space consumed", QUOTA_SIZE - (59 * FILE_SIZE * 3),
-          3 * (fs.getDefaultBlockSize() - FILE_SIZE));
+          3 * (fs.getDefaultBlockSize(dir) - FILE_SIZE));
 
       // Now check that trying to create another file violates the quota
       try {

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java Wed Jun  6 00:17:38 2012
@@ -38,8 +38,7 @@ import org.junit.Assert;
 import org.junit.Test;
 
 /**
- * This class tests that a file need not be closed before its
- * data can be read by another client.
+ * This class tests that data nodes are correctly replaced on failure.
  */
 public class TestReplaceDatanodeOnFailure {
   static final Log LOG = AppendTestUtil.LOG;

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java Wed Jun  6 00:17:38 2012
@@ -161,7 +161,7 @@ public class TestPBHelper {
 
   private void compare(BlockWithLocations locs1, BlockWithLocations locs2) {
     assertEquals(locs1.getBlock(), locs2.getBlock());
-    assertTrue(Arrays.equals(locs1.getDatanodes(), locs2.getDatanodes()));
+    assertTrue(Arrays.equals(locs1.getStorageIDs(), locs2.getStorageIDs()));
   }
 
   @Test

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java Wed Jun  6 00:17:38 2012
@@ -17,12 +17,15 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
+import static org.apache.hadoop.hdfs.server.common.Util.now;
 import static org.junit.Assert.*;
 import java.io.File;
 import java.io.IOException;
+import java.util.Collection;
 
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -32,11 +35,14 @@ import org.apache.hadoop.hdfs.HdfsConfig
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.TestDatanodeBlockScanner;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.junit.Test;
 
 public class TestOverReplicatedBlocks {
@@ -116,6 +122,77 @@ public class TestOverReplicatedBlocks {
       cluster.shutdown();
     }
   }
+
+  static final long SMALL_BLOCK_SIZE =
+    DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
+  static final long SMALL_FILE_LENGTH = SMALL_BLOCK_SIZE * 4;
+
+  /**
+   * The test verifies that replica for deletion is chosen on a node,
+   * with the oldest heartbeat, when this heartbeat is larger than the
+   * tolerable heartbeat interval.
+   * It creates a file with several blocks and replication 4.
+   * The last DN is configured to send heartbeats rarely.
+   * 
+   * Test waits until the tolerable heartbeat interval expires, and reduces
+   * replication of the file. All replica deletions should be scheduled for the
+   * last node. No replicas will actually be deleted, since last DN doesn't
+   * send heartbeats. 
+   */
+  @Test
+  public void testChooseReplicaToDelete() throws IOException {
+    MiniDFSCluster cluster = null;
+    FileSystem fs = null;
+    try {
+      Configuration conf = new HdfsConfiguration();
+      conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, SMALL_BLOCK_SIZE);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+      fs = cluster.getFileSystem();
+      final FSNamesystem namesystem = cluster.getNamesystem();
+
+      conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 300);
+      cluster.startDataNodes(conf, 1, true, null, null, null);
+      DataNode lastDN = cluster.getDataNodes().get(3);
+      DatanodeRegistration dnReg = DataNodeTestUtils.getDNRegistrationForBP(
+          lastDN, namesystem.getBlockPoolId());
+      String lastDNid = dnReg.getStorageID();
+
+      final Path fileName = new Path("/foo2");
+      DFSTestUtil.createFile(fs, fileName, SMALL_FILE_LENGTH, (short)4, 0L);
+      DFSTestUtil.waitReplication(fs, fileName, (short)4);
+
+      // Wait for tolerable number of heartbeats plus one
+      DatanodeDescriptor nodeInfo = null;
+      long lastHeartbeat = 0;
+      long waitTime = DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT * 1000 *
+        (DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_DEFAULT + 1);
+      do {
+        nodeInfo = 
+          namesystem.getBlockManager().getDatanodeManager().getDatanode(dnReg);
+        lastHeartbeat = nodeInfo.getLastUpdate();
+      } while(now() - lastHeartbeat < waitTime);
+      fs.setReplication(fileName, (short)3);
+
+      BlockLocation locs[] = fs.getFileBlockLocations(
+          fs.getFileStatus(fileName), 0, Long.MAX_VALUE);
+
+      // All replicas for deletion should be scheduled on lastDN.
+      // And should not actually be deleted, because lastDN does not heartbeat.
+      namesystem.readLock();
+      Collection<Block> dnBlocks = 
+        namesystem.getBlockManager().excessReplicateMap.get(lastDNid);
+      assertEquals("Replicas on node " + lastDNid + " should have been deleted",
+          SMALL_FILE_LENGTH / SMALL_BLOCK_SIZE, dnBlocks.size());
+      namesystem.readUnlock();
+      for(BlockLocation location : locs)
+        assertEquals("Block should still have 4 replicas",
+            4, location.getNames().length);
+    } finally {
+      if(fs != null) fs.close();
+      if(cluster != null) cluster.shutdown();
+    }
+  }
+
   /**
    * Test over replicated block should get invalidated when decreasing the
    * replication for a partial block.

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java Wed Jun  6 00:17:38 2012
@@ -114,7 +114,7 @@ public class TestJspHelper {
     UserGroupInformation ugi = JspHelper.getUGI(context, request, conf);
     Token<? extends TokenIdentifier> tokenInUgi = ugi.getTokens().iterator()
         .next();
-    Assert.assertEquals(tokenInUgi.getService().toString(), expected);
+    Assert.assertEquals(expected, tokenInUgi.getService().toString());
   }
   
   

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java Wed Jun  6 00:17:38 2012
@@ -29,6 +29,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -115,7 +116,7 @@ public class TestBPOfferService {
             0, HdfsConstants.LAYOUT_VERSION))
       .when(mock).versionRequest();
     
-    Mockito.doReturn(new DatanodeRegistration("1.2.3.4", 100))
+    Mockito.doReturn(DFSTestUtil.getLocalDatanodeRegistration())
       .when(mock).registerDatanode(Mockito.any(DatanodeRegistration.class));
     
     Mockito.doAnswer(new HeartbeatAnswer(nnIdx))

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java Wed Jun  6 00:17:38 2012
@@ -101,7 +101,7 @@ public class TestBlockPoolManager {
   @Test
   public void testFederationRefresh() throws Exception {
     Configuration conf = new Configuration();
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES,
+    conf.set(DFSConfigKeys.DFS_NAMESERVICES,
         "ns1,ns2");
     addNN(conf, "ns1", "mock1:8020");
     addNN(conf, "ns2", "mock1:8020");
@@ -112,7 +112,7 @@ public class TestBlockPoolManager {
     log.setLength(0);
 
     // Remove the first NS
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES,
+    conf.set(DFSConfigKeys.DFS_NAMESERVICES,
         "ns1");
     bpm.refreshNamenodes(conf);
     assertEquals(
@@ -122,7 +122,7 @@ public class TestBlockPoolManager {
     
     // Add back an NS -- this creates a new BPOS since the old
     // one for ns2 should have been previously retired
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES,
+    conf.set(DFSConfigKeys.DFS_NAMESERVICES,
         "ns1,ns2");
     bpm.refreshNamenodes(conf);
     assertEquals(

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java Wed Jun  6 00:17:38 2012
@@ -425,7 +425,7 @@ public class TestBlockRecovery {
     DataNode spyDN = spy(dn);
     doThrow(new RecoveryInProgressException("Replica recovery is in progress")).
        when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));
-    Daemon d = spyDN.recoverBlocks(initRecoveringBlocks());
+    Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
     d.join();
     verify(spyDN, never()).syncBlock(
         any(RecoveringBlock.class), anyListOf(BlockRecord.class));
@@ -445,7 +445,7 @@ public class TestBlockRecovery {
     DataNode spyDN = spy(dn);
     doThrow(new IOException()).
        when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));
-    Daemon d = spyDN.recoverBlocks(initRecoveringBlocks());
+    Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
     d.join();
     verify(spyDN, never()).syncBlock(
         any(RecoveringBlock.class), anyListOf(BlockRecord.class));
@@ -465,7 +465,7 @@ public class TestBlockRecovery {
     doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0,
         block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN).
         initReplicaRecovery(any(RecoveringBlock.class));
-    Daemon d = spyDN.recoverBlocks(initRecoveringBlocks());
+    Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
     d.join();
     DatanodeProtocol dnP = dn.getActiveNamenodeForBP(POOL_ID);
     verify(dnP).commitBlockSynchronization(

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java Wed Jun  6 00:17:38 2012
@@ -18,16 +18,20 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.URL;
 import java.net.URLEncoder;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.jsp.JspWriter;
 
+import org.apache.commons.lang.StringEscapeUtils;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -44,9 +48,10 @@ public class TestDatanodeJsp {
   
   private static final String FILE_DATA = "foo bar baz biz buz";
   private static final HdfsConfiguration CONF = new HdfsConfiguration();
+  private static String viewFilePage;
   
-  private static void testViewingFile(MiniDFSCluster cluster, String filePath,
-      boolean doTail) throws IOException {
+  private static void testViewingFile(MiniDFSCluster cluster, String filePath)
+      throws IOException {
     FileSystem fs = cluster.getFileSystem();
     
     Path testPath = new Path(filePath);
@@ -58,23 +63,46 @@ public class TestDatanodeJsp {
     InetSocketAddress nnHttpAddress = cluster.getNameNode().getHttpAddress();
     int dnInfoPort = cluster.getDataNodes().get(0).getInfoPort();
     
-    String jspName = doTail ? "tail.jsp" : "browseDirectory.jsp";
-    String fileParamName = doTail ? "filename" : "dir";
+    URL url = new URL("http://localhost:" + dnInfoPort + "/"
+        + "browseDirectory.jsp" + JspHelper.getUrlParam("dir", 
+            URLEncoder.encode(testPath.toString(), "UTF-8"), true)
+        + JspHelper.getUrlParam("namenodeInfoPort", Integer
+            .toString(nnHttpAddress.getPort())) + JspHelper
+            .getUrlParam("nnaddr", "localhost:" + nnIpcAddress.getPort()));
     
-    URL url = new URL("http://localhost:" + dnInfoPort + "/" + jspName +
-        JspHelper.getUrlParam(fileParamName, URLEncoder.encode(testPath.toString(), "UTF-8"), true) +
-        JspHelper.getUrlParam("namenodeInfoPort", Integer.toString(nnHttpAddress.getPort())) + 
-        JspHelper.getUrlParam("nnaddr", "localhost:" + nnIpcAddress.getPort()));
-    
-    String viewFilePage = DFSTestUtil.urlGet(url);
+    viewFilePage = StringEscapeUtils.unescapeHtml(DFSTestUtil.urlGet(url));
     
     assertTrue("page should show preview of file contents, got: " + viewFilePage,
         viewFilePage.contains(FILE_DATA));
     
-    if (!doTail) {
-      assertTrue("page should show link to download file", viewFilePage
-          .contains("/streamFile" + ServletUtil.encodePath(testPath.toString()) +
-              "?nnaddr=localhost:" + nnIpcAddress.getPort()));
+    assertTrue("page should show link to download file", viewFilePage
+        .contains("/streamFile" + ServletUtil.encodePath(filePath)
+            + "?nnaddr=localhost:" + nnIpcAddress.getPort()));
+    
+    // check whether able to tail the file
+    String regex = "<a.+href=\"(.+?)\">Tail\\s*this\\s*file\\<\\/a\\>";
+    assertFileContents(regex, "Tail this File");
+    
+    // check whether able to 'Go Back to File View' after tailing the file
+    regex = "<a.+href=\"(.+?)\">Go\\s*Back\\s*to\\s*File\\s*View\\<\\/a\\>";
+    assertFileContents(regex, "Go Back to File View");
+  }
+  
+  private static void assertFileContents(String regex, String text)
+      throws IOException {
+    Pattern compile = Pattern.compile(regex);
+    Matcher matcher = compile.matcher(viewFilePage);
+    URL hyperlink = null;
+    if (matcher.find()) {
+      // got hyperlink for Tail this file
+      hyperlink = new URL(matcher.group(1));
+      viewFilePage = StringEscapeUtils.unescapeHtml(DFSTestUtil
+          .urlGet(hyperlink));
+      assertTrue("page should show preview of file contents", viewFilePage
+          .contains(FILE_DATA));
+    } else {
+      fail(text + " hyperlink should be there in the page content : "
+          + viewFilePage);
     }
   }
   
@@ -97,8 +125,8 @@ public class TestDatanodeJsp {
         "/foo\">bar/foo\">bar"
       };
       for (String p : paths) {
-        testViewingFile(cluster, p, false);
-        testViewingFile(cluster, p, true);
+        testViewingFile(cluster, p);
+        testViewingFile(cluster, p);
       }
     } finally {
       if (cluster != null) {

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java Wed Jun  6 00:17:38 2012
@@ -46,7 +46,7 @@ public class TestDeleteBlockPool {
     Configuration conf = new Configuration();
     MiniDFSCluster cluster = null;
     try {
-      conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES,
+      conf.set(DFSConfigKeys.DFS_NAMESERVICES,
           "namesServerId1,namesServerId2");
       cluster = new MiniDFSCluster.Builder(conf)
         .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
@@ -79,7 +79,7 @@ public class TestDeleteBlockPool {
       }
 
       Configuration nn1Conf = cluster.getConfiguration(1);
-      nn1Conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "namesServerId2");
+      nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId2");
       dn1.refreshNamenodes(nn1Conf);
       assertEquals(1, dn1.getAllBpOs().length);
 
@@ -155,7 +155,7 @@ public class TestDeleteBlockPool {
     Configuration conf = new Configuration();
     MiniDFSCluster cluster = null;
     try {
-      conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES,
+      conf.set(DFSConfigKeys.DFS_NAMESERVICES,
           "namesServerId1,namesServerId2");
       cluster = new MiniDFSCluster.Builder(conf)
         .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
@@ -178,7 +178,7 @@ public class TestDeleteBlockPool {
       File dn1StorageDir2 = cluster.getInstanceStorageDir(0, 1);
       
       Configuration nn1Conf = cluster.getConfiguration(0);
-      nn1Conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "namesServerId1");
+      nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId1");
       dn1.refreshNamenodes(nn1Conf);
       Assert.assertEquals(1, dn1.getAllBpOs().length);
       

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java Wed Jun  6 00:17:38 2012
@@ -105,7 +105,7 @@ public class TestMulitipleNNDataBlockSca
         namenodesBuilder.append(",");
       }
 
-      conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, namenodesBuilder
+      conf.set(DFSConfigKeys.DFS_NAMESERVICES, namenodesBuilder
           .toString());
       DataNode dn = cluster.getDataNodes().get(0);
       dn.refreshNamenodes(conf);
@@ -122,7 +122,7 @@ public class TestMulitipleNNDataBlockSca
 
       namenodesBuilder.append(DFSUtil.getNamenodeNameServiceId(cluster
           .getConfiguration(2)));
-      conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, namenodesBuilder
+      conf.set(DFSConfigKeys.DFS_NAMESERVICES, namenodesBuilder
           .toString());
       dn.refreshNamenodes(conf);
 

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Wed Jun  6 00:17:38 2012
@@ -35,10 +35,12 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
@@ -765,6 +767,7 @@ public class NNThroughputBenchmark {
     ArrayList<Block> blocks;
     int nrBlocks; // actual number of blocks
     long[] blockReportList;
+    int dnIdx;
 
     /**
      * Return a a 6 digit integer port.
@@ -780,11 +783,7 @@ public class NNThroughputBenchmark {
     }
 
     TinyDatanode(int dnIdx, int blockCapacity) throws IOException {
-      String ipAddr = DNS.getDefaultIP("default");
-      String hostName = DNS.getDefaultHost("default", "default");
-      dnRegistration = new DatanodeRegistration(ipAddr, getNodePort(dnIdx));
-      dnRegistration.setHostName(hostName);
-      dnRegistration.setSoftwareVersion(VersionInfo.getVersion());
+      this.dnIdx = dnIdx;
       this.blocks = new ArrayList<Block>(blockCapacity);
       this.nrBlocks = 0;
     }
@@ -800,7 +799,14 @@ public class NNThroughputBenchmark {
     void register() throws IOException {
       // get versions from the namenode
       nsInfo = nameNodeProto.versionRequest();
-      dnRegistration.setStorageInfo(new DataStorage(nsInfo, ""));
+      dnRegistration = new DatanodeRegistration(
+          new DatanodeID(DNS.getDefaultIP("default"),
+              DNS.getDefaultHost("default", "default"),
+              "", getNodePort(dnIdx),
+              DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
+              DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
+          new DataStorage(nsInfo, ""),
+          new ExportedBlockKeys(), VersionInfo.getVersion());
       DataNode.setNewStorageID(dnRegistration);
       // register datanode
       dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
@@ -896,12 +902,9 @@ public class NNThroughputBenchmark {
         for(int t = 0; t < blockTargets.length; t++) {
           DatanodeInfo dnInfo = blockTargets[t];
           DatanodeRegistration receivedDNReg;
-          receivedDNReg =
-            new DatanodeRegistration(dnInfo.getIpAddr(), dnInfo.getXferPort());
-          receivedDNReg.setStorageInfo(
-            new DataStorage(nsInfo, dnInfo.getStorageID()));
-          receivedDNReg.setInfoPort(dnInfo.getInfoPort());
-          receivedDNReg.setIpcPort(dnInfo.getIpcPort());
+          receivedDNReg = new DatanodeRegistration(dnInfo,
+            new DataStorage(nsInfo, dnInfo.getStorageID()),
+            new ExportedBlockKeys(), VersionInfo.getVersion());
           ReceivedDeletedBlockInfo[] rdBlocks = {
             new ReceivedDeletedBlockInfo(
                   blocks[i], ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java Wed Jun  6 00:17:38 2012
@@ -1115,7 +1115,7 @@ public class TestCheckpoint extends Test
     Configuration conf = new HdfsConfiguration();
     String nameserviceId1 = "ns1";
     String nameserviceId2 = "ns2";
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, nameserviceId1
+    conf.set(DFSConfigKeys.DFS_NAMESERVICES, nameserviceId1
         + "," + nameserviceId2);
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
         .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Wed Jun  6 00:17:38 2012
@@ -22,6 +22,7 @@ import java.io.*;
 import java.net.URI;
 import java.util.Collection;
 import java.util.Iterator;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -527,7 +528,7 @@ public class TestEditLog extends TestCas
     } catch (IOException e) {
       // expected
       assertEquals("Cause of exception should be ChecksumException",
-          e.getCause().getClass(), ChecksumException.class);
+          ChecksumException.class, e.getCause().getClass());
     }
   }
 
@@ -739,8 +740,9 @@ public class TestEditLog extends TestCas
         throw ioe;
       } else {
         GenericTestUtils.assertExceptionContains(
-            "No non-corrupt logs for txid 3",
-            ioe);
+          "Gap in transactions. Expected to be able to read up until " +
+          "at least txid 3 but unable to find any edit logs containing " +
+          "txid 3", ioe);
       }
     } finally {
       cluster.shutdown();
@@ -765,16 +767,16 @@ public class TestEditLog extends TestCas
       tracker = new FSEditLogLoader.PositionTrackingInputStream(in);
       in = new DataInputStream(tracker);
             
-      reader = new FSEditLogOp.Reader(in, version);
+      reader = new FSEditLogOp.Reader(in, tracker, version);
     }
   
     @Override
-    public long getFirstTxId() throws IOException {
+    public long getFirstTxId() {
       return HdfsConstants.INVALID_TXID;
     }
     
     @Override
-    public long getLastTxId() throws IOException {
+    public long getLastTxId() {
       return HdfsConstants.INVALID_TXID;
     }
   
@@ -1103,9 +1105,9 @@ public class TestEditLog extends TestCas
 
     for (EditLogInputStream edits : editStreams) {
       FSEditLogLoader.EditLogValidation val = FSEditLogLoader.validateEditLog(edits);
-      long read = val.getNumTransactions();
+      long read = (val.getEndTxId() - edits.getFirstTxId()) + 1;
       LOG.info("Loading edits " + edits + " read " + read);
-      assertEquals(startTxId, val.getStartTxId());
+      assertEquals(startTxId, edits.getFirstTxId());
       startTxId += read;
       totaltxnread += read;
     }
@@ -1153,7 +1155,9 @@ public class TestEditLog extends TestCas
       fail("Should have thrown exception");
     } catch (IOException ioe) {
       GenericTestUtils.assertExceptionContains(
-          "No non-corrupt logs for txid " + startGapTxId, ioe);
+          "Gap in transactions. Expected to be able to read up until " +
+          "at least txid 40 but unable to find any edit logs containing " +
+          "txid 11", ioe);
     }
   }
 
@@ -1227,4 +1231,55 @@ public class TestEditLog extends TestCas
       validateNoCrash(garbage);
     }
   }
+
+  /**
+   * Test creating a directory with lots and lots of edit log segments
+   */
+  @Test
+  public void testManyEditLogSegments() throws IOException {
+    final int NUM_EDIT_LOG_ROLLS = 1000;
+    // start a cluster
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = null;
+    FileSystem fileSys = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
+      cluster.waitActive();
+      fileSys = cluster.getFileSystem();
+      final FSNamesystem namesystem = cluster.getNamesystem();
+      FSImage fsimage = namesystem.getFSImage();
+      final FSEditLog editLog = fsimage.getEditLog();
+      for (int i = 0; i < NUM_EDIT_LOG_ROLLS; i++){
+        editLog.logSetReplication("fakefile" + i, (short)(i % 3));
+        assertExistsInStorageDirs(
+            cluster, NameNodeDirType.EDITS,
+            NNStorage.getInProgressEditsFileName((i * 3) + 1));
+        editLog.logSync();
+        editLog.rollEditLog();
+        assertExistsInStorageDirs(
+            cluster, NameNodeDirType.EDITS,
+            NNStorage.getFinalizedEditsFileName((i * 3) + 1, (i * 3) + 3));
+      }
+      editLog.close();
+    } finally {
+      if(fileSys != null) fileSys.close();
+      if(cluster != null) cluster.shutdown();
+    }
+
+    // How long does it take to read through all these edit logs?
+    long startTime = System.currentTimeMillis();
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).
+          numDataNodes(NUM_DATA_NODES).build();
+      cluster.waitActive();
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+    long endTime = System.currentTimeMillis();
+    double delta = ((float)(endTime - startTime)) / 1000.0;
+    LOG.info(String.format("loaded %d edit log segments in %.2f seconds",
+        NUM_EDIT_LOG_ROLLS, delta));
+  }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java Wed Jun  6 00:17:38 2012
@@ -40,8 +40,6 @@ import org.junit.Test;
 import org.mockito.Mockito;
 
 public class TestEditLogFileOutputStream {
-  
-  private final static long PREALLOCATION_LENGTH = (1024 * 1024) + 4;
   private final static int HEADER_LEN = 17;
   private static final File TEST_EDITS =
     new File(System.getProperty("test.build.data","/tmp"),
@@ -51,24 +49,25 @@ public class TestEditLogFileOutputStream
   public void deleteEditsFile() {
     TEST_EDITS.delete();
   }
-  
+
   @Test
   public void testPreallocation() throws IOException {
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
         .build();
 
+    final long START_TXID = 1;
     StorageDirectory sd = cluster.getNameNode().getFSImage()
       .getStorage().getStorageDir(0);
-    File editLog = NNStorage.getInProgressEditsFile(sd, 1);
+    File editLog = NNStorage.getInProgressEditsFile(sd, START_TXID);
 
     EditLogValidation validation = EditLogFileInputStream.validateEditLog(editLog);
     assertEquals("Edit log should contain a header as valid length",
         HEADER_LEN, validation.getValidLength());
-    assertEquals(1, validation.getNumTransactions());
+    assertEquals(validation.getEndTxId(), START_TXID);
     assertEquals("Edit log should have 1MB pre-allocated, plus 4 bytes " +
         "for the version number",
-        PREALLOCATION_LENGTH, editLog.length());
+        EditLogFileOutputStream.PREALLOCATION_LENGTH + 4, editLog.length());
     
 
     cluster.getFileSystem().mkdirs(new Path("/tmp"),
@@ -79,10 +78,10 @@ public class TestEditLogFileOutputStream
     assertTrue("Edit log should have more valid data after writing a txn " +
         "(was: " + oldLength + " now: " + validation.getValidLength() + ")",
         validation.getValidLength() > oldLength);
-    assertEquals(2, validation.getNumTransactions());
+    assertEquals(1, validation.getEndTxId() - START_TXID);
 
     assertEquals("Edit log should be 1MB long, plus 4 bytes for the version number",
-        PREALLOCATION_LENGTH, editLog.length());
+        EditLogFileOutputStream.PREALLOCATION_LENGTH + 4, editLog.length());
     // 256 blocks for the 1MB of preallocation space
     assertTrue("Edit log disk space used should be at least 257 blocks",
         256 * 4096 <= new DU(editLog, conf).getUsed());

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java Wed Jun  6 00:17:38 2012
@@ -22,12 +22,15 @@ import static org.junit.Assert.assertEqu
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.BufferedInputStream;
 import java.io.File;
 import java.io.FileInputStream;
+import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.RandomAccessFile;
 import java.nio.channels.FileChannel;
 import java.util.Map;
+import java.util.Set;
 import java.util.SortedMap;
 
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -38,16 +41,23 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.log4j.Level;
 import org.junit.Test;
 
 import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
 import com.google.common.io.Files;
 
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.spy;
+
 public class TestFSEditLogLoader {
   
   static {
@@ -152,108 +162,6 @@ public class TestFSEditLogLoader {
   }
   
   /**
-   * Test that the valid number of transactions can be counted from a file.
-   * @throws IOException 
-   */
-  @Test
-  public void testCountValidTransactions() throws IOException {
-    File testDir = new File(TEST_DIR, "testCountValidTransactions");
-    File logFile = new File(testDir,
-        NNStorage.getInProgressEditsFileName(1));
-    
-    // Create a log file, and return the offsets at which each
-    // transaction starts.
-    FSEditLog fsel = null;
-    final int NUM_TXNS = 30;
-    SortedMap<Long, Long> offsetToTxId = Maps.newTreeMap();
-    try {
-      fsel = FSImageTestUtil.createStandaloneEditLog(testDir);
-      fsel.openForWrite();
-      assertTrue("should exist: " + logFile, logFile.exists());
-      
-      for (int i = 0; i < NUM_TXNS; i++) {
-        long trueOffset = getNonTrailerLength(logFile);
-        long thisTxId = fsel.getLastWrittenTxId() + 1;
-        offsetToTxId.put(trueOffset, thisTxId);
-        System.err.println("txid " + thisTxId + " at offset " + trueOffset);
-        fsel.logDelete("path" + i, i);
-        fsel.logSync();
-      }
-    } finally {
-      if (fsel != null) {
-        fsel.close();
-      }
-    }
-
-    // The file got renamed when the log was closed.
-    logFile = testDir.listFiles()[0];
-    long validLength = getNonTrailerLength(logFile);
-
-    // Make sure that uncorrupted log has the expected length and number
-    // of transactions.
-    EditLogValidation validation = EditLogFileInputStream.validateEditLog(logFile);
-    assertEquals(NUM_TXNS + 2, validation.getNumTransactions());
-    assertEquals(validLength, validation.getValidLength());
-    
-    // Back up the uncorrupted log
-    File logFileBak = new File(testDir, logFile.getName() + ".bak");
-    Files.copy(logFile, logFileBak);
-
-    // Corrupt the log file in various ways for each txn
-    for (Map.Entry<Long, Long> entry : offsetToTxId.entrySet()) {
-      long txOffset = entry.getKey();
-      long txid = entry.getValue();
-      
-      // Restore backup, truncate the file exactly before the txn
-      Files.copy(logFileBak, logFile);
-      truncateFile(logFile, txOffset);
-      validation = EditLogFileInputStream.validateEditLog(logFile);
-      assertEquals("Failed when truncating to length " + txOffset,
-          txid - 1, validation.getNumTransactions());
-      assertEquals(txOffset, validation.getValidLength());
-
-      // Restore backup, truncate the file with one byte in the txn,
-      // also isn't valid
-      Files.copy(logFileBak, logFile);
-      truncateFile(logFile, txOffset + 1);
-      validation = EditLogFileInputStream.validateEditLog(logFile);
-      assertEquals("Failed when truncating to length " + (txOffset + 1),
-          txid - 1, validation.getNumTransactions());
-      assertEquals(txOffset, validation.getValidLength());
-
-      // Restore backup, corrupt the txn opcode
-      Files.copy(logFileBak, logFile);
-      corruptByteInFile(logFile, txOffset);
-      validation = EditLogFileInputStream.validateEditLog(logFile);
-      assertEquals("Failed when corrupting txn opcode at " + txOffset,
-          txid - 1, validation.getNumTransactions());
-      assertEquals(txOffset, validation.getValidLength());
-
-      // Restore backup, corrupt a byte a few bytes into the txn
-      Files.copy(logFileBak, logFile);
-      corruptByteInFile(logFile, txOffset+5);
-      validation = EditLogFileInputStream.validateEditLog(logFile);
-      assertEquals("Failed when corrupting txn data at " + (txOffset+5),
-          txid - 1, validation.getNumTransactions());
-      assertEquals(txOffset, validation.getValidLength());
-    }
-    
-    // Corrupt the log at every offset to make sure that validation itself
-    // never throws an exception, and that the calculated lengths are monotonically
-    // increasing
-    long prevNumValid = 0;
-    for (long offset = 0; offset < validLength; offset++) {
-      Files.copy(logFileBak, logFile);
-      corruptByteInFile(logFile, offset);
-      EditLogValidation val = EditLogFileInputStream.validateEditLog(logFile);
-      assertTrue(String.format("%d should have been >= %d",
-          val.getNumTransactions(), prevNumValid),
-          val.getNumTransactions() >= prevNumValid);
-      prevNumValid = val.getNumTransactions();
-    }
-  }
-
-  /**
    * Corrupt the byte at the given offset in the given file,
    * by subtracting 1 from it.
    */
@@ -316,4 +224,118 @@ public class TestFSEditLogLoader {
       fis.close();
     }
   }
+
+  @Test
+  public void testStreamLimiter() throws IOException {
+    final File LIMITER_TEST_FILE = new File(TEST_DIR, "limiter.test");
+    
+    FileOutputStream fos = new FileOutputStream(LIMITER_TEST_FILE);
+    try {
+      fos.write(0x12);
+      fos.write(0x12);
+      fos.write(0x12);
+    } finally {
+      fos.close();
+    }
+    
+    FileInputStream fin = new FileInputStream(LIMITER_TEST_FILE);
+    BufferedInputStream bin = new BufferedInputStream(fin);
+    FSEditLogLoader.PositionTrackingInputStream tracker = 
+        new FSEditLogLoader.PositionTrackingInputStream(bin);
+    try {
+      tracker.setLimit(2);
+      tracker.mark(100);
+      tracker.read();
+      tracker.read();
+      try {
+        tracker.read();
+        fail("expected to get IOException after reading past the limit");
+      } catch (IOException e) {
+      }
+      tracker.reset();
+      tracker.mark(100);
+      byte arr[] = new byte[3];
+      try {
+        tracker.read(arr);
+        fail("expected to get IOException after reading past the limit");
+      } catch (IOException e) {
+      }
+      tracker.reset();
+      arr = new byte[2];
+      tracker.read(arr);
+    } finally {
+      tracker.close();
+    }
+  }
+
+  /**
+   * Create an unfinalized edit log for testing purposes
+   *
+   * @param testDir           Directory to create the edit log in
+   * @param numTx             Number of transactions to add to the new edit log
+   * @param offsetToTxId      A map from transaction IDs to offsets in the 
+   *                          edit log file.
+   * @return                  The new edit log file name.
+   * @throws IOException
+   */
+  static private File prepareUnfinalizedTestEditLog(File testDir, int numTx,
+      SortedMap<Long, Long> offsetToTxId) throws IOException {
+    File inProgressFile = new File(testDir, NNStorage.getInProgressEditsFileName(1));
+    FSEditLog fsel = null, spyLog = null;
+    try {
+      fsel = FSImageTestUtil.createStandaloneEditLog(testDir);
+      spyLog = spy(fsel);
+      // Normally, the in-progress edit log would be finalized by
+      // FSEditLog#endCurrentLogSegment.  For testing purposes, we
+      // disable that here.
+      doNothing().when(spyLog).endCurrentLogSegment(true);
+      spyLog.openForWrite();
+      assertTrue("should exist: " + inProgressFile, inProgressFile.exists());
+      
+      for (int i = 0; i < numTx; i++) {
+        long trueOffset = getNonTrailerLength(inProgressFile);
+        long thisTxId = spyLog.getLastWrittenTxId() + 1;
+        offsetToTxId.put(trueOffset, thisTxId);
+        System.err.println("txid " + thisTxId + " at offset " + trueOffset);
+        spyLog.logDelete("path" + i, i);
+        spyLog.logSync();
+      }
+    } finally {
+      if (spyLog != null) {
+        spyLog.close();
+      } else if (fsel != null) {
+        fsel.close();
+      }
+    }
+    return inProgressFile;
+  }
+
+  @Test
+  public void testValidateEditLogWithCorruptHeader() throws IOException {
+    File testDir = new File(TEST_DIR, "testValidateEditLogWithCorruptHeader");
+    SortedMap<Long, Long> offsetToTxId = Maps.newTreeMap();
+    File logFile = prepareUnfinalizedTestEditLog(testDir, 2, offsetToTxId);
+    RandomAccessFile rwf = new RandomAccessFile(logFile, "rw");
+    try {
+      rwf.seek(0);
+      rwf.writeLong(42); // corrupt header
+    } finally {
+      rwf.close();
+    }
+    EditLogValidation validation = EditLogFileInputStream.validateEditLog(logFile);
+    assertTrue(validation.hasCorruptHeader());
+  }
+
+  @Test
+  public void testValidateEmptyEditLog() throws IOException {
+    File testDir = new File(TEST_DIR, "testValidateEmptyEditLog");
+    SortedMap<Long, Long> offsetToTxId = Maps.newTreeMap();
+    File logFile = prepareUnfinalizedTestEditLog(testDir, 0, offsetToTxId);
+    // Truncate the file so that there is nothing except the header
+    truncateFile(logFile, 4);
+    EditLogValidation validation =
+        EditLogFileInputStream.validateEditLog(logFile);
+    assertTrue(!validation.hasCorruptHeader());
+    assertEquals(HdfsConstants.INVALID_TXID, validation.getEndTxId());
+  }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java Wed Jun  6 00:17:38 2012
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.na
 import static org.junit.Assert.*;
 
 import java.net.URI;
+import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 import java.util.Iterator;
@@ -29,10 +30,14 @@ import java.io.File;
 import java.io.FilenameFilter;
 import java.io.IOException;
 import org.junit.Test;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.JournalManager.CorruptionException;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.setupEdits;
 import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec;
@@ -40,10 +45,52 @@ import static org.apache.hadoop.hdfs.ser
 import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.TXNS_PER_FAIL;
 
 import com.google.common.collect.ImmutableList;
+import com.google.common.collect.TreeMultiset;
 import com.google.common.base.Joiner;
 
 public class TestFileJournalManager {
+  static final Log LOG = LogFactory.getLog(TestFileJournalManager.class);
 
+  /**
+   * Find out how many transactions we can read from a
+   * FileJournalManager, starting at a given transaction ID.
+   * 
+   * @param jm              The journal manager
+   * @param fromTxId        Transaction ID to start at
+   * @param inProgressOk    Should we consider edit logs that are not finalized?
+   * @return                The number of transactions
+   * @throws IOException
+   */
+  static long getNumberOfTransactions(FileJournalManager jm, long fromTxId,
+      boolean inProgressOk, boolean abortOnGap) throws IOException {
+    long numTransactions = 0, txId = fromTxId;
+    final TreeMultiset<EditLogInputStream> allStreams =
+        TreeMultiset.create(JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);
+    jm.selectInputStreams(allStreams, fromTxId, inProgressOk);
+
+    try {
+      for (EditLogInputStream elis : allStreams) {
+        elis.skipUntil(txId);
+        while (true) {
+          FSEditLogOp op = elis.readOp();
+          if (op == null) {
+            break;
+          }
+          if (abortOnGap && (op.getTransactionId() != txId)) {
+            LOG.info("getNumberOfTransactions: detected gap at txId " +
+                fromTxId);
+            return numTransactions;
+          }
+          txId = op.getTransactionId() + 1;
+          numTransactions++;
+        }
+      }
+    } finally {
+      IOUtils.cleanup(LOG, allStreams.toArray(new EditLogInputStream[0]));
+    }
+    return numTransactions;
+  }
+  
   /** 
    * Test the normal operation of loading transactions from
    * file journal manager. 3 edits directories are setup without any
@@ -61,7 +108,7 @@ public class TestFileJournalManager {
     long numJournals = 0;
     for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.EDITS)) {
       FileJournalManager jm = new FileJournalManager(sd, storage);
-      assertEquals(6*TXNS_PER_ROLL, jm.getNumberOfTransactions(1, true));
+      assertEquals(6*TXNS_PER_ROLL, getNumberOfTransactions(jm, 1, true, false));
       numJournals++;
     }
     assertEquals(3, numJournals);
@@ -82,7 +129,7 @@ public class TestFileJournalManager {
 
     FileJournalManager jm = new FileJournalManager(sd, storage);
     assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, 
-                 jm.getNumberOfTransactions(1, true));
+                 getNumberOfTransactions(jm, 1, true, false));
   }
 
   /**
@@ -104,16 +151,16 @@ public class TestFileJournalManager {
     Iterator<StorageDirectory> dirs = storage.dirIterator(NameNodeDirType.EDITS);
     StorageDirectory sd = dirs.next();
     FileJournalManager jm = new FileJournalManager(sd, storage);
-    assertEquals(6*TXNS_PER_ROLL, jm.getNumberOfTransactions(1, true));
+    assertEquals(6*TXNS_PER_ROLL, getNumberOfTransactions(jm, 1, true, false));
     
     sd = dirs.next();
     jm = new FileJournalManager(sd, storage);
-    assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, jm.getNumberOfTransactions(1,
-        true));
+    assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, getNumberOfTransactions(jm, 1,
+        true, false));
 
     sd = dirs.next();
     jm = new FileJournalManager(sd, storage);
-    assertEquals(6*TXNS_PER_ROLL, jm.getNumberOfTransactions(1, true));
+    assertEquals(6*TXNS_PER_ROLL, getNumberOfTransactions(jm, 1, true, false));
   }
 
   /** 
@@ -137,18 +184,18 @@ public class TestFileJournalManager {
     Iterator<StorageDirectory> dirs = storage.dirIterator(NameNodeDirType.EDITS);
     StorageDirectory sd = dirs.next();
     FileJournalManager jm = new FileJournalManager(sd, storage);
-    assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, jm.getNumberOfTransactions(1,
-        true));
+    assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, getNumberOfTransactions(jm, 1,
+        true, false));
     
     sd = dirs.next();
     jm = new FileJournalManager(sd, storage);
-    assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, jm.getNumberOfTransactions(1,
-        true));
+    assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, getNumberOfTransactions(jm, 1,
+        true, false));
 
     sd = dirs.next();
     jm = new FileJournalManager(sd, storage);
-    assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, jm.getNumberOfTransactions(1,
-        true));
+    assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, getNumberOfTransactions(jm, 1,
+        true, false));
   }
 
   /** 
@@ -198,24 +245,15 @@ public class TestFileJournalManager {
 
     FileJournalManager jm = new FileJournalManager(sd, storage);
     long expectedTotalTxnCount = TXNS_PER_ROLL*10 + TXNS_PER_FAIL;
-    assertEquals(expectedTotalTxnCount, jm.getNumberOfTransactions(1, true));
+    assertEquals(expectedTotalTxnCount, getNumberOfTransactions(jm, 1,
+        true, false));
 
     long skippedTxns = (3*TXNS_PER_ROLL); // skip first 3 files
     long startingTxId = skippedTxns + 1; 
 
-    long numTransactionsToLoad = jm.getNumberOfTransactions(startingTxId, true);
-    long numLoaded = 0;
-    while (numLoaded < numTransactionsToLoad) {
-      EditLogInputStream editIn = jm.getInputStream(startingTxId, true);
-      FSEditLogLoader.EditLogValidation val = FSEditLogLoader.validateEditLog(editIn);
-      long count = val.getNumTransactions();
-
-      editIn.close();
-      startingTxId += count;
-      numLoaded += count;
-    }
-
-    assertEquals(expectedTotalTxnCount - skippedTxns, numLoaded); 
+    long numLoadable = getNumberOfTransactions(jm, startingTxId,
+        true, false);
+    assertEquals(expectedTotalTxnCount - skippedTxns, numLoadable); 
   }
 
   /**
@@ -236,8 +274,8 @@ public class TestFileJournalManager {
     // 10 rolls, so 11 rolled files, 110 txids total.
     final int TOTAL_TXIDS = 10 * 11;
     for (int txid = 1; txid <= TOTAL_TXIDS; txid++) {
-      assertEquals((TOTAL_TXIDS - txid) + 1, jm.getNumberOfTransactions(txid,
-          true));
+      assertEquals((TOTAL_TXIDS - txid) + 1, getNumberOfTransactions(jm, txid,
+          true, false));
     }
   }
 
@@ -269,19 +307,13 @@ public class TestFileJournalManager {
     assertTrue(files[0].delete());
     
     FileJournalManager jm = new FileJournalManager(sd, storage);
-    assertEquals(startGapTxId-1, jm.getNumberOfTransactions(1, true));
+    assertEquals(startGapTxId-1, getNumberOfTransactions(jm, 1, true, true));
 
-    try {
-      jm.getNumberOfTransactions(startGapTxId, true);
-      fail("Should have thrown an exception by now");
-    } catch (IOException ioe) {
-      GenericTestUtils.assertExceptionContains(
-          "Gap in transactions, max txnid is 110, 0 txns from 31", ioe);
-    }
+    assertEquals(0, getNumberOfTransactions(jm, startGapTxId, true, true));
 
     // rolled 10 times so there should be 11 files.
     assertEquals(11*TXNS_PER_ROLL - endGapTxId, 
-                 jm.getNumberOfTransactions(endGapTxId + 1, true));
+                 getNumberOfTransactions(jm, endGapTxId + 1, true, true));
   }
 
   /** 
@@ -308,7 +340,7 @@ public class TestFileJournalManager {
 
     FileJournalManager jm = new FileJournalManager(sd, storage);
     assertEquals(10*TXNS_PER_ROLL+1, 
-                 jm.getNumberOfTransactions(1, true));
+                 getNumberOfTransactions(jm, 1, true, false));
   }
 
   @Test
@@ -345,6 +377,33 @@ public class TestFileJournalManager {
     FileJournalManager.matchEditLogs(badDir);
   }
   
+  private static EditLogInputStream getJournalInputStream(JournalManager jm,
+      long txId, boolean inProgressOk) throws IOException {
+    final TreeMultiset<EditLogInputStream> allStreams =
+        TreeMultiset.create(JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);
+    jm.selectInputStreams(allStreams, txId, inProgressOk);
+    try {
+      for (Iterator<EditLogInputStream> iter = allStreams.iterator();
+          iter.hasNext();) {
+        EditLogInputStream elis = iter.next();
+        if (elis.getFirstTxId() > txId) {
+          break;
+        }
+        if (elis.getLastTxId() < txId) {
+          iter.remove();
+          elis.close();
+          continue;
+        }
+        elis.skipUntil(txId);
+        iter.remove();
+        return elis;
+      }
+    } finally {
+      IOUtils.cleanup(LOG,  allStreams.toArray(new EditLogInputStream[0]));
+    }
+    return null;
+  }
+    
   /**
    * Make sure that we starting reading the correct op when we request a stream
    * with a txid in the middle of an edit log file.
@@ -359,7 +418,7 @@ public class TestFileJournalManager {
     
     FileJournalManager jm = new FileJournalManager(sd, storage);
     
-    EditLogInputStream elis = jm.getInputStream(5, true);
+    EditLogInputStream elis = getJournalInputStream(jm, 5, true);
     FSEditLogOp op = elis.readOp();
     assertEquals("read unexpected op", op.getTransactionId(), 5);
   }
@@ -381,9 +440,9 @@ public class TestFileJournalManager {
     FileJournalManager jm = new FileJournalManager(sd, storage);
     
     // If we exclude the in-progess stream, we should only have 100 tx.
-    assertEquals(100, jm.getNumberOfTransactions(1, false));
+    assertEquals(100, getNumberOfTransactions(jm, 1, false, false));
     
-    EditLogInputStream elis = jm.getInputStream(90, false);
+    EditLogInputStream elis = getJournalInputStream(jm, 90, false);
     FSEditLogOp lastReadOp = null;
     while ((lastReadOp = elis.readOp()) != null) {
       assertTrue(lastReadOp.getTransactionId() <= 100);

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Wed Jun  6 00:17:38 2012
@@ -18,21 +18,27 @@
 
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.junit.Assert.*;
+
 import java.io.BufferedReader;
 import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.FileReader;
 import java.io.IOException;
 import java.io.PrintStream;
+import java.io.PrintWriter;
 import java.io.RandomAccessFile;
+import java.io.StringWriter;
+import java.io.Writer;
+import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.nio.channels.FileChannel;
 import java.security.PrivilegedExceptionAction;
+import java.util.HashMap;
+import java.util.Map;
 import java.util.Random;
 import java.util.regex.Pattern;
 
-import junit.framework.TestCase;
-
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -42,25 +48,30 @@ import org.apache.hadoop.fs.permission.F
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.Result;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.tools.DFSck;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 import org.apache.log4j.PatternLayout;
 import org.apache.log4j.RollingFileAppender;
+import org.junit.Test;
 
 /**
  * A JUnit test for doing fsck
  */
-public class TestFsck extends TestCase {
+public class TestFsck {
   static final String auditLogFile = System.getProperty("test.build.dir",
       "build/test") + "/audit.log";
   
@@ -79,13 +90,15 @@ public class TestFsck extends TestCase {
     PrintStream out = new PrintStream(bStream, true);
     ((Log4JLogger)FSPermissionChecker.LOG).getLogger().setLevel(Level.ALL);
     int errCode = ToolRunner.run(new DFSck(conf, out), path);
-    if (checkErrorCode)
+    if (checkErrorCode) {
       assertEquals(expectedErrCode, errCode);
+    }
     ((Log4JLogger)FSPermissionChecker.LOG).getLogger().setLevel(Level.INFO);
     return bStream.toString();
   }
 
   /** do fsck */
+  @Test
   public void testFsck() throws Exception {
     DFSTestUtil util = new DFSTestUtil("TestFsck", 20, 3, 8*1024);
     MiniDFSCluster cluster = null;
@@ -158,6 +171,7 @@ public class TestFsck extends TestCase {
     assertNull("Unexpected event in audit log", reader.readLine());
   }
   
+  @Test
   public void testFsckNonExistent() throws Exception {
     DFSTestUtil util = new DFSTestUtil("TestFsck", 20, 3, 8*1024);
     MiniDFSCluster cluster = null;
@@ -180,6 +194,7 @@ public class TestFsck extends TestCase {
   }
 
   /** Test fsck with permission set on inodes */
+  @Test
   public void testFsckPermission() throws Exception {
     final DFSTestUtil util = new DFSTestUtil(getClass().getSimpleName(), 20, 3, 8*1024);
     final Configuration conf = new HdfsConfiguration();
@@ -227,6 +242,7 @@ public class TestFsck extends TestCase {
     }
   }
 
+  @Test
   public void testFsckMoveAndDelete() throws Exception {
     final int MAX_MOVE_TRIES = 5;
     DFSTestUtil util = new DFSTestUtil("TestFsck", 5, 3, 8*1024);
@@ -300,6 +316,7 @@ public class TestFsck extends TestCase {
     }
   }
   
+  @Test
   public void testFsckOpenFiles() throws Exception {
     DFSTestUtil util = new DFSTestUtil("TestFsck", 4, 3, 8*1024); 
     MiniDFSCluster cluster = null;
@@ -350,6 +367,7 @@ public class TestFsck extends TestCase {
     }
   }
 
+  @Test
   public void testCorruptBlock() throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
@@ -426,6 +444,7 @@ public class TestFsck extends TestCase {
    * 
    * @throws Exception
    */
+  @Test
   public void testFsckError() throws Exception {
     MiniDFSCluster cluster = null;
     try {
@@ -460,6 +479,7 @@ public class TestFsck extends TestCase {
   }
   
   /** check if option -list-corruptfiles of fsck command works properly */
+  @Test
   public void testFsckListCorruptFilesBlocks() throws Exception {
     Configuration conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
@@ -529,6 +549,7 @@ public class TestFsck extends TestCase {
    * Test for checking fsck command on illegal arguments should print the proper
    * usage.
    */
+  @Test
   public void testToCheckTheFsckCommandOnIllegalArguments() throws Exception {
     MiniDFSCluster cluster = null;
     try {
@@ -560,4 +581,73 @@ public class TestFsck extends TestCase {
       }
     }
   }
+  
+  /**
+   * Tests that the # of missing block replicas and expected replicas is correct
+   * @throws IOException
+   */
+  @Test
+  public void testFsckMissingReplicas() throws IOException {
+    // Desired replication factor
+    // Set this higher than NUM_REPLICAS so it's under-replicated
+    final short REPL_FACTOR = 2;
+    // Number of replicas to actually start
+    final short NUM_REPLICAS = 1;
+    // Number of blocks to write
+    final short NUM_BLOCKS = 3;
+    // Set a small-ish blocksize
+    final long blockSize = 512;
+    
+    Configuration conf = new Configuration();
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
+    
+    MiniDFSCluster cluster = null;
+    DistributedFileSystem dfs = null;
+    
+    try {
+      // Startup a minicluster
+      cluster = 
+          new MiniDFSCluster.Builder(conf).numDataNodes(NUM_REPLICAS).build();
+      assertNotNull("Failed Cluster Creation", cluster);
+      cluster.waitClusterUp();
+      dfs = (DistributedFileSystem) cluster.getFileSystem();
+      assertNotNull("Failed to get FileSystem", dfs);
+      
+      // Create a file that will be intentionally under-replicated
+      final String pathString = new String("/testfile");
+      final Path path = new Path(pathString);
+      long fileLen = blockSize * NUM_BLOCKS;
+      DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
+      
+      // Create an under-replicated file
+      NameNode namenode = cluster.getNameNode();
+      NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
+          .getDatanodeManager().getNetworkTopology();
+      Map<String,String[]> pmap = new HashMap<String, String[]>();
+      Writer result = new StringWriter();
+      PrintWriter out = new PrintWriter(result, true);
+      InetAddress remoteAddress = InetAddress.getLocalHost();
+      NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, 
+          NUM_REPLICAS, (short)1, remoteAddress);
+      
+      // Run the fsck and check the Result
+      final HdfsFileStatus file = 
+          namenode.getRpcServer().getFileInfo(pathString);
+      assertNotNull(file);
+      Result res = new Result(conf);
+      fsck.check(pathString, file, res);
+      // Also print the output from the fsck, for ex post facto sanity checks
+      System.out.println(result.toString());
+      assertEquals(res.missingReplicas, 
+          (NUM_BLOCKS*REPL_FACTOR) - (NUM_BLOCKS*NUM_REPLICAS));
+      assertEquals(res.numExpectedReplicas, NUM_BLOCKS*REPL_FACTOR);
+    } finally {
+      if(dfs != null) {
+        dfs.close();
+      }
+      if(cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java Wed Jun  6 00:17:38 2012
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
 import org.junit.Test;
 
 import static org.mockito.Mockito.mock;
@@ -26,9 +24,9 @@ import static org.junit.Assert.*;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.io.Writable;
 
 import java.net.URI;
+import java.util.Collection;
 import java.io.IOException;
 
 public class TestGenericJournalConf {
@@ -144,15 +142,8 @@ public class TestGenericJournalConf {
     }
 
     @Override
-    public EditLogInputStream getInputStream(long fromTxnId, boolean inProgressOk)
-        throws IOException {
-      return null;
-    }
-
-    @Override
-    public long getNumberOfTransactions(long fromTxnId, boolean inProgressOk)
-        throws IOException {
-      return 0;
+    public void selectInputStreams(Collection<EditLogInputStream> streams,
+        long fromTxnId, boolean inProgressOk) {
     }
 
     @Override

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java Wed Jun  6 00:17:38 2012
@@ -21,20 +21,29 @@ import static org.junit.Assert.*;
 
 import java.io.IOException;
 
+import javax.servlet.ServletContext;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.util.KerberosName;
+import org.apache.hadoop.security.authorize.AccessControlList;
 import org.junit.Test;
+import org.mockito.ArgumentMatcher;
+import org.mockito.Mockito;
 
 public class TestGetImageServlet {
   
   @Test
-  public void testIsValidRequestorWithHa() throws IOException {
+  public void testIsValidRequestor() throws IOException {
     Configuration conf = new HdfsConfiguration();
+    KerberosName.setRules("RULE:[1:$1]\nRULE:[2:$1]");
     
     // Set up generic HA configs.
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "ns1");
+    conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1");
     conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX,
         "ns1"), "nn1,nn2");
     
@@ -53,8 +62,33 @@ public class TestGetImageServlet {
     // Initialize this conf object as though we're running on NN1.
     NameNode.initializeGenericKeys(conf, "ns1", "nn1");
     
+    AccessControlList acls = Mockito.mock(AccessControlList.class);
+    Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
+    ServletContext context = Mockito.mock(ServletContext.class);
+    Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls);
+    
     // Make sure that NN2 is considered a valid fsimage/edits requestor.
-    assertTrue(GetImageServlet.isValidRequestor("hdfs/host2@TEST-REALM.COM",
-        conf));
+    assertTrue(GetImageServlet.isValidRequestor(context,
+        "hdfs/host2@TEST-REALM.COM", conf));
+    
+    // Mark atm as an admin.
+    Mockito.when(acls.isUserAllowed(Mockito.argThat(new ArgumentMatcher<UserGroupInformation>() {
+      @Override
+      public boolean matches(Object argument) {
+        return ((UserGroupInformation) argument).getShortUserName().equals("atm");
+      }
+    }))).thenReturn(true);
+    
+    // Make sure that NN2 is still considered a valid requestor.
+    assertTrue(GetImageServlet.isValidRequestor(context,
+        "hdfs/host2@TEST-REALM.COM", conf));
+    
+    // Make sure an admin is considered a valid requestor.
+    assertTrue(GetImageServlet.isValidRequestor(context,
+        "atm@TEST-REALM.COM", conf));
+    
+    // Make sure other users are *not* considered valid requestors.
+    assertFalse(GetImageServlet.isValidRequestor(context,
+        "todd@TEST-REALM.COM", conf));
   }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java Wed Jun  6 00:17:38 2012
@@ -25,6 +25,8 @@ import java.util.HashSet;
 import java.util.Set;
 
 import static org.junit.Assert.*;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.spy;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -37,7 +39,6 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
 import org.apache.hadoop.hdfs.server.namenode.FSImage;
-import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteOp;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
@@ -213,15 +214,129 @@ public class TestNameNodeRecovery {
   public void testSkipEdit() throws IOException {
     runEditLogTest(new EltsTestGarbageInEditLog());
   }
-  
-  /** Test that we can successfully recover from a situation where the last
-   * entry in the edit log has been truncated. */
-  @Test(timeout=180000)
-  public void testRecoverTruncatedEditLog() throws IOException {
+
+  /**
+   * An algorithm for corrupting an edit log.
+   */
+  static interface Corruptor {
+    /*
+     * Corrupt an edit log file.
+     *
+     * @param editFile   The edit log file
+     */
+    public void corrupt(File editFile) throws IOException;
+
+    /*
+     * Explain whether we need to read the log in recovery mode
+     *
+     * @param finalized  True if the edit log in question is finalized.
+     *                   We're a little more lax about reading unfinalized
+     *                   logs.  We will allow a small amount of garbage at
+     *                   the end.  In a finalized log, every byte must be
+     *                   perfect.
+     *
+     * @return           Whether we need to read the log in recovery mode
+     */
+    public boolean needRecovery(boolean finalized);
+
+    /*
+     * Get the name of this corruptor
+     *
+     * @return           The Corruptor name
+     */
+    public String getName();
+  }
+
+  static class TruncatingCorruptor implements Corruptor {
+    @Override
+    public void corrupt(File editFile) throws IOException {
+      // Corrupt the last edit
+      long fileLen = editFile.length();
+      RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
+      rwf.setLength(fileLen - 1);
+      rwf.close();
+    }
+
+    @Override
+    public boolean needRecovery(boolean finalized) {
+      return finalized;
+    }
+
+    @Override
+    public String getName() {
+      return "truncated";
+    }
+  }
+
+  static class PaddingCorruptor implements Corruptor {
+    @Override
+    public void corrupt(File editFile) throws IOException {
+      // Add junk to the end of the file
+      RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
+      rwf.seek(editFile.length());
+      for (int i = 0; i < 129; i++) {
+        rwf.write((byte)0);
+      }
+      rwf.write(0xd);
+      rwf.write(0xe);
+      rwf.write(0xa);
+      rwf.write(0xd);
+      rwf.close();
+    }
+
+    @Override
+    public boolean needRecovery(boolean finalized) {
+      // With finalized edit logs, we ignore what's at the end as long as we
+      // can make it to the correct transaction ID.
+      // With unfinalized edit logs, the finalization process ignores garbage
+      // at the end.
+      return false;
+    }
+
+    @Override
+    public String getName() {
+      return "padFatal";
+    }
+  }
+
+  static class SafePaddingCorruptor implements Corruptor {
+    private byte padByte;
+
+    public SafePaddingCorruptor(byte padByte) {
+      this.padByte = padByte;
+      assert ((this.padByte == 0) || (this.padByte == -1));
+    }
+
+    @Override
+    public void corrupt(File editFile) throws IOException {
+      // Add junk to the end of the file
+      RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
+      rwf.seek(editFile.length());
+      rwf.write((byte)-1);
+      for (int i = 0; i < 1024; i++) {
+        rwf.write(padByte);
+      }
+      rwf.close();
+    }
+
+    @Override
+    public boolean needRecovery(boolean finalized) {
+      return false;
+    }
+
+    @Override
+    public String getName() {
+      return "pad" + ((int)padByte);
+    }
+  }
+
+  static void testNameNodeRecoveryImpl(Corruptor corruptor, boolean finalize)
+      throws IOException {
     final String TEST_PATH = "/test/path/dir";
-    final int NUM_TEST_MKDIRS = 10;
-    
-    // start a cluster 
+    final String TEST_PATH2 = "/second/dir";
+    final boolean needRecovery = corruptor.needRecovery(finalize);
+
+    // start a cluster
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = null;
     FileSystem fileSys = null;
@@ -230,12 +345,20 @@ public class TestNameNodeRecovery {
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
           .build();
       cluster.waitActive();
+      if (!finalize) {
+        // Normally, the in-progress edit log would be finalized by
+        // FSEditLog#endCurrentLogSegment.  For testing purposes, we
+        // disable that here.
+        FSEditLog spyLog =
+            spy(cluster.getNameNode().getFSImage().getEditLog());
+        doNothing().when(spyLog).endCurrentLogSegment(true);
+        cluster.getNameNode().getFSImage().setEditLogForTesting(spyLog);
+      }
       fileSys = cluster.getFileSystem();
       final FSNamesystem namesystem = cluster.getNamesystem();
       FSImage fsimage = namesystem.getFSImage();
-      for (int i = 0; i < NUM_TEST_MKDIRS; i++) {
-        fileSys.mkdirs(new Path(TEST_PATH));
-      }
+      fileSys.mkdirs(new Path(TEST_PATH));
+      fileSys.mkdirs(new Path(TEST_PATH2));
       sd = fsimage.getStorage().dirIterator(NameNodeDirType.EDITS).next();
     } finally {
       if (cluster != null) {
@@ -246,13 +369,12 @@ public class TestNameNodeRecovery {
     File editFile = FSImageTestUtil.findLatestEditsLog(sd).getFile();
     assertTrue("Should exist: " + editFile, editFile.exists());
 
-    // Corrupt the last edit
-    long fileLen = editFile.length();
-    RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
-    rwf.setLength(fileLen - 1);
-    rwf.close();
-    
-    // Make sure that we can't start the cluster normally before recovery
+    // Corrupt the edit log
+    LOG.info("corrupting edit log file '" + editFile + "'");
+    corruptor.corrupt(editFile);
+
+    // If needRecovery == true, make sure that we can't start the
+    // cluster normally before recovery
     cluster = null;
     try {
       LOG.debug("trying to start normally (this should fail)...");
@@ -260,16 +382,24 @@ public class TestNameNodeRecovery {
           .format(false).build();
       cluster.waitActive();
       cluster.shutdown();
-      fail("expected the truncated edit log to prevent normal startup");
+      if (needRecovery) {
+        fail("expected the corrupted edit log to prevent normal startup");
+      }
     } catch (IOException e) {
-      // success
+      if (!needRecovery) {
+        LOG.error("Got unexpected failure with " + corruptor.getName() +
+            corruptor, e);
+        fail("got unexpected exception " + e.getMessage());
+      }
     } finally {
       if (cluster != null) {
         cluster.shutdown();
       }
     }
-    
-    // Perform recovery
+
+    // Perform NameNode recovery.
+    // Even if there was nothing wrong previously (needRecovery == false),
+    // this should still work fine.
     cluster = null;
     try {
       LOG.debug("running recovery...");
@@ -277,22 +407,23 @@ public class TestNameNodeRecovery {
           .format(false).startupOption(recoverStartOpt).build();
     } catch (IOException e) {
       fail("caught IOException while trying to recover. " +
-          "message was " + e.getMessage() + 
+          "message was " + e.getMessage() +
           "\nstack trace\n" + StringUtils.stringifyException(e));
     } finally {
       if (cluster != null) {
         cluster.shutdown();
       }
     }
-    
+
     // Make sure that we can start the cluster normally after recovery
     cluster = null;
     try {
       LOG.debug("starting cluster normally after recovery...");
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
           .format(false).build();
-      LOG.debug("testRecoverTruncatedEditLog: successfully recovered the " +
-          "truncated edit log");
+      LOG.debug("successfully recovered the " + corruptor.getName() +
+          " corrupted edit log");
+      cluster.waitActive();
       assertTrue(cluster.getFileSystem().exists(new Path(TEST_PATH)));
     } catch (IOException e) {
       fail("failed to recover.  Error message: " + e.getMessage());
@@ -302,4 +433,36 @@ public class TestNameNodeRecovery {
       }
     }
   }
+
+  /** Test that we can successfully recover from a situation where the last
+   * entry in the edit log has been truncated. */
+  @Test(timeout=180000)
+  public void testRecoverTruncatedEditLog() throws IOException {
+    testNameNodeRecoveryImpl(new TruncatingCorruptor(), true);
+    testNameNodeRecoveryImpl(new TruncatingCorruptor(), false);
+  }
+
+  /** Test that we can successfully recover from a situation where the last
+   * entry in the edit log has been padded with garbage. */
+  @Test(timeout=180000)
+  public void testRecoverPaddedEditLog() throws IOException {
+    testNameNodeRecoveryImpl(new PaddingCorruptor(), true);
+    testNameNodeRecoveryImpl(new PaddingCorruptor(), false);
+  }
+
+  /** Test that don't need to recover from a situation where the last
+   * entry in the edit log has been padded with 0. */
+  @Test(timeout=180000)
+  public void testRecoverZeroPaddedEditLog() throws IOException {
+    testNameNodeRecoveryImpl(new SafePaddingCorruptor((byte)0), true);
+    testNameNodeRecoveryImpl(new SafePaddingCorruptor((byte)0), false);
+  }
+
+  /** Test that don't need to recover from a situation where the last
+   * entry in the edit log has been padded with 0xff bytes. */
+  @Test(timeout=180000)
+  public void testRecoverNegativeOnePaddedEditLog() throws IOException {
+    testNameNodeRecoveryImpl(new SafePaddingCorruptor((byte)-1), true);
+    testNameNodeRecoveryImpl(new SafePaddingCorruptor((byte)-1), false);
+  }
 }



Mime
View raw message