hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject svn commit: r1158072 [6/7] - in /hadoop/common/branches/HDFS-1623/hdfs: ./ ivy/ src/c++/libhdfs/ src/contrib/ src/contrib/fuse-dfs/ src/java/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/protocol/ src/java/org/apache/hadoop/hdfs/ser...
Date Tue, 16 Aug 2011 00:37:25 GMT
Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestHftpFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestHftpFileSystem.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestHftpFileSystem.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestHftpFileSystem.java Tue Aug 16 00:37:15 2011
@@ -19,14 +19,15 @@
 package org.apache.hadoop.hdfs;
 
 import java.io.IOException;
+import java.net.URISyntaxException;
 import java.net.URL;
 import java.net.HttpURLConnection;
 import java.util.Random;
 
-import junit.extensions.TestSetup;
-import junit.framework.Test;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
+import org.junit.Test;
+import org.junit.BeforeClass;
+import org.junit.AfterClass;
+import static org.junit.Assert.*;
 
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
@@ -39,26 +40,48 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.util.ServletUtil;
 import org.apache.log4j.Level;
 
-/**
- * Unittest for HftpFileSystem.
- *
- */
-public class TestHftpFileSystem extends TestCase {
+public class TestHftpFileSystem {
   private static final Random RAN = new Random();
-  private static final Path TEST_FILE = new Path("/testfile+1");
   
   private static Configuration config = null;
   private static MiniDFSCluster cluster = null;
   private static FileSystem hdfs = null;
   private static HftpFileSystem hftpFs = null;
   private static String blockPoolId = null;
-  
-  /**
-   * Setup hadoop mini-cluster for test.
-   */
-  private static void oneTimeSetUp() throws IOException {
+
+  private static Path[] TEST_PATHS = new Path[] {
+      // URI does not encode, Request#getPathInfo returns /foo
+      new Path("/foo;bar"),
+
+      // URI does not encode, Request#getPathInfo returns verbatim
+      new Path("/foo+"),
+      new Path("/foo+bar/foo+bar"),
+      new Path("/foo=bar/foo=bar"),
+      new Path("/foo,bar/foo,bar"),
+      new Path("/foo@bar/foo@bar"),
+      new Path("/foo&bar/foo&bar"),
+      new Path("/foo$bar/foo$bar"),
+      new Path("/foo_bar/foo_bar"),
+      new Path("/foo~bar/foo~bar"),
+      new Path("/foo.bar/foo.bar"),
+      new Path("/foo../bar/foo../bar"),
+      new Path("/foo.../bar/foo.../bar"),
+      new Path("/foo'bar/foo'bar"),
+      new Path("/foo#bar/foo#bar"),
+      new Path("/foo!bar/foo!bar"),
+      // HDFS file names may not contain ":"
+
+      // URI percent encodes, Request#getPathInfo decodes
+      new Path("/foo bar/foo bar"),
+      new Path("/foo?bar/foo?bar"),
+      new Path("/foo\">bar/foo\">bar"),
+    };
+
+  @BeforeClass
+  public static void setUp() throws IOException {
     ((Log4JLogger)HftpFileSystem.LOG).getLogger().setLevel(Level.ALL);
 
     final long seed = RAN.nextLong();
@@ -67,66 +90,73 @@ public class TestHftpFileSystem extends 
 
     config = new Configuration();
     config.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost");
-
     cluster = new MiniDFSCluster.Builder(config).numDataNodes(2).build();
     hdfs = cluster.getFileSystem();
     blockPoolId = cluster.getNamesystem().getBlockPoolId();
-    final String hftpuri = 
+    final String hftpUri = 
       "hftp://" + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
-    hftpFs = (HftpFileSystem) new Path(hftpuri).getFileSystem(config);
+    hftpFs = (HftpFileSystem) new Path(hftpUri).getFileSystem(config);
   }
   
-  /**
-   * Shutdown the hadoop mini-cluster.
-   */
-  private static void oneTimeTearDown() throws IOException {
+  @AfterClass
+  public static void tearDown() throws IOException {
     hdfs.close();
     hftpFs.close();
     cluster.shutdown();
   }
-  
-  public TestHftpFileSystem(String name) {
-    super(name);
-  }
 
   /**
-   * For one time setup / teardown.
+   * Test file creation and access with file names that need encoding. 
    */
-  public static Test suite() {
-    TestSuite suite = new TestSuite();
-    
-    suite.addTestSuite(TestHftpFileSystem.class);
-    
-    return new TestSetup(suite) {
-      @Override
-      protected void setUp() throws IOException {
-        oneTimeSetUp();
-      }
-      
-      @Override
-      protected void tearDown() throws IOException {
-        oneTimeTearDown();
-      }
-    };
+  @Test
+  public void testFileNameEncoding() throws IOException, URISyntaxException {
+    for (Path p : TEST_PATHS) {
+      // Create and access the path (data and streamFile servlets)
+      FSDataOutputStream out = hdfs.create(p, true);
+      out.writeBytes("0123456789");
+      out.close();
+      FSDataInputStream in = hftpFs.open(p);
+      assertEquals('0', in.read());
+
+      // Check the file status matches the path. Hftp returns a FileStatus
+      // with the entire URI, extract the path part.
+      assertEquals(p, new Path(hftpFs.getFileStatus(p).getPath().toUri().getPath()));
+
+      // Test list status (listPath servlet)
+      assertEquals(1, hftpFs.listStatus(p).length);
+
+      // Test content summary (contentSummary servlet)
+      assertNotNull("No content summary", hftpFs.getContentSummary(p));
+
+      // Test checksums (fileChecksum and getFileChecksum servlets)
+      assertNotNull("No file checksum", hftpFs.getFileChecksum(p));
+    }
   }
-  
-  public void testDataNodeRedirect() throws Exception {
-    if (hdfs.exists(TEST_FILE)) {
-      hdfs.delete(TEST_FILE, true);
+
+  private void testDataNodeRedirect(Path path) throws IOException {
+    // Create the file
+    if (hdfs.exists(path)) {
+      hdfs.delete(path, true);
     }
-    FSDataOutputStream out = hdfs.create(TEST_FILE, (short) 1);
+    FSDataOutputStream out = hdfs.create(path, (short)1);
     out.writeBytes("0123456789");
     out.close();
-    
+
+    // Get the path's block location so we can determine
+    // if we were redirected to the right DN.
     BlockLocation[] locations = 
-        hdfs.getFileBlockLocations(TEST_FILE, 0, 10);
-    
+        hdfs.getFileBlockLocations(path, 0, 10);
     String locationName = locations[0].getNames()[0];
-    URL u = hftpFs.getNamenodeFileURL(TEST_FILE);
+
+    // Connect to the NN to get redirected
+    URL u = hftpFs.getNamenodeURL(
+        "/data" + ServletUtil.encodePath(path.toUri().getPath()), 
+        "ugi=userx,groupy");
     HttpURLConnection conn = (HttpURLConnection)u.openConnection();
     HttpURLConnection.setFollowRedirects(true);
     conn.connect();
     conn.getInputStream();
+
     boolean checked = false;
     // Find the datanode that has the block according to locations
     // and check that the URL was redirected to this DN's info port
@@ -138,19 +168,32 @@ public class TestHftpFileSystem extends 
         assertEquals(dnR.getInfoPort(), conn.getURL().getPort());
       }
     }
-    assertTrue("The test never checked that location of " + 
-              "the block and hftp desitnation are the same", checked);
+    assertTrue("The test never checked that location of " +
+               "the block and hftp desitnation are the same", checked);
+  }
+
+  /**
+   * Test that clients are redirected to the appropriate DN.
+   */
+  @Test
+  public void testDataNodeRedirect() throws IOException {
+    for (Path p : TEST_PATHS) {
+      testDataNodeRedirect(p);
+    }
   }
+
   /**
    * Tests getPos() functionality.
    */
-  public void testGetPos() throws Exception {
+  @Test
+  public void testGetPos() throws IOException {
+    final Path testFile = new Path("/testfile+1");
     // Write a test file.
-    FSDataOutputStream out = hdfs.create(TEST_FILE, true);
+    FSDataOutputStream out = hdfs.create(testFile, true);
     out.writeBytes("0123456789");
     out.close();
     
-    FSDataInputStream in = hftpFs.open(TEST_FILE);
+    FSDataInputStream in = hftpFs.open(testFile);
     
     // Test read().
     for (int i = 0; i < 5; ++i) {
@@ -175,17 +218,17 @@ public class TestHftpFileSystem extends 
     assertEquals(10, in.getPos());
     in.close();
   }
-  
+
   /**
    * Tests seek().
    */
-  public void testSeek() throws Exception {
-    // Write a test file.
-    FSDataOutputStream out = hdfs.create(TEST_FILE, true);
+  @Test
+  public void testSeek() throws IOException {
+    final Path testFile = new Path("/testfile+1");
+    FSDataOutputStream out = hdfs.create(testFile, true);
     out.writeBytes("0123456789");
     out.close();
-    
-    FSDataInputStream in = hftpFs.open(TEST_FILE);
+    FSDataInputStream in = hftpFs.open(testFile);
     in.seek(7);
     assertEquals('7', in.read());
   }

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLease.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLease.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLease.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLease.java Tue Aug 16 00:37:15 2011
@@ -24,6 +24,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.Assert;
 import org.junit.Test;
@@ -31,7 +32,8 @@ import org.mockito.Mockito;
 
 public class TestLease {
   static boolean hasLease(MiniDFSCluster cluster, Path src) {
-    return cluster.getNamesystem().leaseManager.getLeaseByPath(src.toString()) != null;
+    return NameNodeAdapter.getLeaseManager(cluster.getNamesystem()
+        ).getLeaseByPath(src.toString()) != null;
   }
   
   final Path dir = new Path("/test/lease/");

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java Tue Aug 16 00:37:15 2011
@@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.protocol.L
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.TestInterDatanodeProtocol;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 
 public class TestLeaseRecovery extends junit.framework.TestCase {
   static final int BLOCK_SIZE = 1024;
@@ -133,7 +134,7 @@ public class TestLeaseRecovery extends j
       DFSTestUtil.waitReplication(dfs, filepath, (short)1);
       waitLeaseRecovery(cluster);
       // verify that we still cannot recover the lease
-      LeaseManager lm = cluster.getNamesystem().leaseManager;
+      LeaseManager lm = NameNodeAdapter.getLeaseManager(cluster.getNamesystem());
       assertTrue("Found " + lm.countLease() + " lease, expected 1", lm.countLease() == 1);
       cluster.getNameNode().setSafeMode(FSConstants.SafeModeAction.SAFEMODE_LEAVE);
     }

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java Tue Aug 16 00:37:15 2011
@@ -29,6 +29,7 @@ import org.apache.hadoop.fs.ChecksumExce
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 
 /**
  * The test makes sure that NameNode detects presense blocks that do not have
@@ -56,6 +57,7 @@ public class TestMissingBlocksAlert exte
       cluster = new MiniDFSCluster.Builder(conf).build();
       cluster.waitActive();
 
+      final BlockManager bm = cluster.getNamesystem().getBlockManager();
       DistributedFileSystem dfs = 
                             (DistributedFileSystem) cluster.getFileSystem();
 
@@ -86,8 +88,7 @@ public class TestMissingBlocksAlert exte
       }
       assertTrue(dfs.getMissingBlocksCount() == 1);
       assertEquals(4, dfs.getUnderReplicatedBlocksCount());
-      assertEquals(3, 
-          cluster.getNamesystem().getUnderReplicatedNotMissingBlocks());
+      assertEquals(3, bm.getUnderReplicatedNotMissingBlocks());
 
 
       // Now verify that it shows up on webui
@@ -109,8 +110,7 @@ public class TestMissingBlocksAlert exte
       }
 
       assertEquals(2, dfs.getUnderReplicatedBlocksCount());
-      assertEquals(2, 
-          cluster.getNamesystem().getUnderReplicatedNotMissingBlocks());
+      assertEquals(2, bm.getUnderReplicatedNotMissingBlocks());
 
       // and make sure WARNING disappears
       // Now verify that it shows up on webui

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/security/TestDelegationToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/security/TestDelegationToken.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/security/TestDelegationToken.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/security/TestDelegationToken.java Tue Aug 16 00:37:15 2011
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.HdfsConfig
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -48,7 +49,8 @@ import org.junit.Test;
 
 public class TestDelegationToken {
   private MiniDFSCluster cluster;
-  Configuration config;
+  private DelegationTokenSecretManager dtSecretManager;
+  private Configuration config;
   private static final Log LOG = LogFactory.getLog(TestDelegationToken.class);
   
   @Before
@@ -61,7 +63,9 @@ public class TestDelegationToken {
     FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
     cluster = new MiniDFSCluster.Builder(config).build();
     cluster.waitActive();
-    cluster.getNamesystem().getDelegationTokenSecretManager().startThreads();
+    dtSecretManager = NameNodeAdapter.getDtSecretManager(
+        cluster.getNamesystem());
+    dtSecretManager.startThreads();
   }
 
   @After
@@ -73,8 +77,6 @@ public class TestDelegationToken {
 
   private Token<DelegationTokenIdentifier> generateDelegationToken(
       String owner, String renewer) {
-    DelegationTokenSecretManager dtSecretManager = cluster.getNamesystem()
-        .getDelegationTokenSecretManager();
     DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(new Text(
         owner), new Text(renewer), null);
     return new Token<DelegationTokenIdentifier>(dtId, dtSecretManager);
@@ -82,8 +84,6 @@ public class TestDelegationToken {
   
   @Test
   public void testDelegationTokenSecretManager() throws Exception {
-    DelegationTokenSecretManager dtSecretManager = cluster.getNamesystem()
-        .getDelegationTokenSecretManager();
     Token<DelegationTokenIdentifier> token = generateDelegationToken(
         "SomeUser", "JobTracker");
     // Fake renewer should not be able to renew
@@ -122,8 +122,6 @@ public class TestDelegationToken {
   
   @Test 
   public void testCancelDelegationToken() throws Exception {
-    DelegationTokenSecretManager dtSecretManager = cluster.getNamesystem()
-        .getDelegationTokenSecretManager();
     Token<DelegationTokenIdentifier> token = generateDelegationToken(
         "SomeUser", "JobTracker");
     //Fake renewer should not be able to renew
@@ -144,7 +142,6 @@ public class TestDelegationToken {
   
   @Test
   public void testDelegationTokenDFSApi() throws Exception {
-    DelegationTokenSecretManager dtSecretManager = cluster.getNamesystem().getDelegationTokenSecretManager();
     DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
     Token<DelegationTokenIdentifier> token = dfs.getDelegationToken("JobTracker");
     DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java Tue Aug 16 00:37:15 2011
@@ -31,19 +31,20 @@ import java.util.Enumeration;
 
 import junit.framework.Assert;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.io.Text;
-import org.apache.commons.logging.*;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.security.TestDoAsEffectiveUser;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -98,7 +99,7 @@ public class TestDelegationTokenForProxy
     FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
     cluster = new MiniDFSCluster.Builder(config).build();
     cluster.waitActive();
-    cluster.getNamesystem().getDelegationTokenSecretManager().startThreads();
+    NameNodeAdapter.getDtSecretManager(cluster.getNamesystem()).startThreads();
     ProxyUsers.refreshSuperUserGroupsConfiguration(config);
   }
 

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java Tue Aug 16 00:37:15 2011
@@ -25,10 +25,13 @@ import java.util.Set;
 
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.util.Daemon;
 
 public class BlockManagerTestUtil {
+  public static void setNodeReplicationLimit(final BlockManager blockManager,
+      final int limit) {
+    blockManager.maxReplicationStreams = limit;
+  }
 
   /** @return the datanode descriptor for the given the given storageID. */
   public static DatanodeDescriptor getDatanode(final FSNamesystem ns,

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java Tue Aug 16 00:37:15 2011
@@ -45,8 +45,8 @@ public class TestComputeInvalidateWork e
       final FSNamesystem namesystem = cluster.getNamesystem();
       final BlockManager bm = namesystem.getBlockManager();
       final int blockInvalidateLimit = bm.getDatanodeManager().blockInvalidateLimit;
-      DatanodeDescriptor[] nodes =
-        namesystem.heartbeats.toArray(new DatanodeDescriptor[NUM_OF_DATANODES]);
+      final DatanodeDescriptor[] nodes = bm.getDatanodeManager(
+          ).getHeartbeatManager().getDatanodes();
       assertEquals(nodes.length, NUM_OF_DATANODES);
       
       namesystem.writeLock();

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java Tue Aug 16 00:37:15 2011
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import java.io.IOException;
 import java.util.ArrayList;
 
 import junit.framework.TestCase;
@@ -52,6 +51,8 @@ public class TestHeartbeatHandling exten
     try {
       cluster.waitActive();
       final FSNamesystem namesystem = cluster.getNamesystem();
+      final HeartbeatManager hm = namesystem.getBlockManager(
+          ).getDatanodeManager().getHeartbeatManager();
       final String poolId = namesystem.getBlockPoolId();
       final DatanodeRegistration nodeReg = 
         DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
@@ -69,12 +70,12 @@ public class TestHeartbeatHandling exten
 
       try {
         namesystem.writeLock();
-        synchronized (namesystem.heartbeats) {
+        synchronized(hm) {
           for (int i=0; i<MAX_REPLICATE_BLOCKS; i++) {
             dd.addBlockToBeReplicated(
                 new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP), ONE_TARGET);
           }
-          DatanodeCommand[]cmds = sendHeartBeat(nodeReg, dd, namesystem);
+          DatanodeCommand[]cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem);
           assertEquals(1, cmds.length);
           assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
           assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);
@@ -84,26 +85,26 @@ public class TestHeartbeatHandling exten
             blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP));
           }
           dd.addBlocksToBeInvalidated(blockList);
-          cmds = sendHeartBeat(nodeReg, dd, namesystem);
+          cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem);
           assertEquals(2, cmds.length);
           assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
           assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);
           assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
           assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length);
           
-          cmds = sendHeartBeat(nodeReg, dd, namesystem);
+          cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem);
           assertEquals(2, cmds.length);
           assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
           assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);
           assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
           assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length);
           
-          cmds = sendHeartBeat(nodeReg, dd, namesystem);
+          cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem);
           assertEquals(1, cmds.length);
           assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[0].getAction());
           assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);
 
-          cmds = sendHeartBeat(nodeReg, dd, namesystem);
+          cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem);
           assertEquals(null, cmds);
         }
       } finally {
@@ -113,10 +114,4 @@ public class TestHeartbeatHandling exten
       cluster.shutdown();
     }
   }
-  
-  private static DatanodeCommand[] sendHeartBeat(DatanodeRegistration nodeReg,
-      DatanodeDescriptor dd, FSNamesystem namesystem) throws IOException {
-    return namesystem.handleHeartbeat(nodeReg, dd.getCapacity(), 
-        dd.getDfsUsed(), dd.getRemaining(), dd.getBlockPoolUsed(), 0, 0, 0);
-  }
 }

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java Tue Aug 16 00:37:15 2011
@@ -63,7 +63,7 @@ import org.apache.hadoop.util.DiskChecke
  * Note the synchronization is coarse grained - it is at each method. 
  */
 
-public class SimulatedFSDataset  implements FSConstants, FSDatasetInterface, Configurable{
+public class SimulatedFSDataset  implements FSDatasetInterface, Configurable{
   
   public static final String CONFIG_PROPERTY_SIMULATED =
                                     "dfs.datanode.simulateddatastorage";

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java Tue Aug 16 00:37:15 2011
@@ -37,7 +37,7 @@ import org.apache.hadoop.hdfs.DFSTestUti
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
 import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Before;
@@ -113,10 +113,11 @@ public class TestDataNodeVolumeFailureRe
      * heartbeat their capacities.
      */
     Thread.sleep(WAIT_FOR_HEARTBEATS);
-    FSNamesystem ns = cluster.getNamesystem();
+    final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
+        ).getDatanodeManager();
 
-    long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(ns);
-    long dnCapacity = DFSTestUtil.getDatanodeCapacity(ns, 0);
+    final long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
+    long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
 
     File dn1Vol1 = new File(dataDir, "data"+(2*0+1));
     File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
@@ -160,7 +161,7 @@ public class TestDataNodeVolumeFailureRe
     assert (WAIT_FOR_HEARTBEATS * 10) > WAIT_FOR_DEATH;
 
     // Eventually the NN should report two volume failures
-    DFSTestUtil.waitForDatanodeStatus(ns, 3, 0, 2, 
+    DFSTestUtil.waitForDatanodeStatus(dm, 3, 0, 2, 
         origCapacity - (1*dnCapacity), WAIT_FOR_HEARTBEATS);
 
     /*
@@ -177,10 +178,10 @@ public class TestDataNodeVolumeFailureRe
 
     ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
     ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
-    ns.DFSNodesStatus(live, dead);
+    dm.fetchDatanodes(live, dead, false);
     live.clear();
     dead.clear();
-    ns.DFSNodesStatus(live, dead);
+    dm.fetchDatanodes(live, dead, false);
     assertEquals("DN3 should have 1 failed volume",
         1, live.get(2).getVolumeFailures());
 
@@ -189,8 +190,8 @@ public class TestDataNodeVolumeFailureRe
      * total capacity should be down by three volumes (assuming the host
      * did not grow or shrink the data volume while the test was running).
      */
-    dnCapacity = DFSTestUtil.getDatanodeCapacity(ns, 0);
-    DFSTestUtil.waitForDatanodeStatus(ns, 3, 0, 3, 
+    dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
+    DFSTestUtil.waitForDatanodeStatus(dm, 3, 0, 3, 
         origCapacity - (3*dnCapacity), WAIT_FOR_HEARTBEATS);
 
     /*
@@ -212,7 +213,7 @@ public class TestDataNodeVolumeFailureRe
         getMetrics(dns.get(2).getMetrics().name()));
 
     // The NN considers the DN dead
-    DFSTestUtil.waitForDatanodeStatus(ns, 2, 1, 2, 
+    DFSTestUtil.waitForDatanodeStatus(dm, 2, 1, 2, 
         origCapacity - (4*dnCapacity), WAIT_FOR_HEARTBEATS);
 
     /*
@@ -236,7 +237,7 @@ public class TestDataNodeVolumeFailureRe
      * and that the volume failure count should be reported as zero by
      * both the metrics and the NN.
      */
-    DFSTestUtil.waitForDatanodeStatus(ns, 3, 0, 0, origCapacity, 
+    DFSTestUtil.waitForDatanodeStatus(dm, 3, 0, 0, origCapacity, 
         WAIT_FOR_HEARTBEATS);
   }
 
@@ -251,9 +252,10 @@ public class TestDataNodeVolumeFailureRe
     cluster.startDataNodes(conf, 2, true, null, null);
     cluster.waitActive();
 
-    FSNamesystem ns = cluster.getNamesystem();
-    long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(ns);
-    long dnCapacity = DFSTestUtil.getDatanodeCapacity(ns, 0);
+    final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
+        ).getDatanodeManager();
+    long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
+    long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
 
     // Fail the first volume on both datanodes (we have to keep the 
     // third healthy so one node in the pipeline will not fail). 
@@ -267,13 +269,13 @@ public class TestDataNodeVolumeFailureRe
     DFSTestUtil.waitReplication(fs, file1, (short)2);
 
     // The NN reports two volumes failures
-    DFSTestUtil.waitForDatanodeStatus(ns, 3, 0, 2, 
+    DFSTestUtil.waitForDatanodeStatus(dm, 3, 0, 2, 
         origCapacity - (1*dnCapacity), WAIT_FOR_HEARTBEATS);
 
     // After restarting the NN it still see the two failures
     cluster.restartNameNode(0);
     cluster.waitActive();
-    DFSTestUtil.waitForDatanodeStatus(ns, 3, 0, 2,
+    DFSTestUtil.waitForDatanodeStatus(dm, 3, 0, 2,
         origCapacity - (1*dnCapacity), WAIT_FOR_HEARTBEATS);
   }
 }

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java Tue Aug 16 00:37:15 2011
@@ -17,29 +17,32 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assume.assumeTrue;
+
 import java.io.File;
 import java.io.IOException;
+import java.util.concurrent.TimeoutException;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.log4j.Level;
-
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
-import static org.junit.Assert.*;
-import static org.junit.Assume.assumeTrue;
 
 /**
  * Test the ability of a DN to tolerate volume failures.
@@ -154,9 +157,10 @@ public class TestDataNodeVolumeFailureTo
     conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 0);
     cluster.startDataNodes(conf, 2, true, null, null);
     cluster.waitActive();
-    FSNamesystem ns = cluster.getNamesystem();
-    long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(ns);
-    long dnCapacity = DFSTestUtil.getDatanodeCapacity(ns, 0);
+    final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
+        ).getDatanodeManager();
+    long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
+    long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
 
     // Fail a volume on the 2nd DN
     File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
@@ -168,7 +172,7 @@ public class TestDataNodeVolumeFailureTo
     DFSTestUtil.waitReplication(fs, file1, (short)2);
 
     // Check that this single failure caused a DN to die.
-    DFSTestUtil.waitForDatanodeStatus(ns, 2, 1, 0, 
+    DFSTestUtil.waitForDatanodeStatus(dm, 2, 1, 0, 
         origCapacity - (1*dnCapacity), WAIT_FOR_HEARTBEATS);
 
     // If we restore the volume we should still only be able to get
@@ -187,7 +191,7 @@ public class TestDataNodeVolumeFailureTo
    */
   private void restartDatanodes(int volTolerated, boolean manageDfsDirs)
       throws IOException {
-    //Make sure no datanode is running
+    // Make sure no datanode is running
     cluster.shutdownDataNodes();
     conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, volTolerated);
     cluster.startDataNodes(conf, 1, manageDfsDirs, null, null);
@@ -224,7 +228,7 @@ public class TestDataNodeVolumeFailureTo
    */
   private void testVolumeConfig(int volumesTolerated, int volumesFailed,
       boolean expectedBPServiceState, boolean manageDfsDirs)
-      throws IOException, InterruptedException {
+      throws IOException, InterruptedException, TimeoutException {
     assumeTrue(!System.getProperty("os.name").startsWith("Windows"));
     final int dnIndex = 0;
     // Fail the current directory since invalid storage directory perms
@@ -259,4 +263,30 @@ public class TestDataNodeVolumeFailureTo
     assertEquals("Couldn't chmod local vol", 0,
         FileUtil.chmod(dir.toString(), "000"));
   }
+
+  /**
+   * Test that a volume that is considered failed on startup is seen as
+   *  a failed volume by the NN.
+   */
+  @Test
+  public void testFailedVolumeOnStartupIsCounted() throws Exception {
+    assumeTrue(!System.getProperty("os.name").startsWith("Windows"));
+    final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
+    ).getDatanodeManager();
+    long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
+    File dir = new File(MiniDFSCluster.getStorageDir(0, 0), "current");
+
+    try {
+      prepareDirToFail(dir);
+      restartDatanodes(1, false);
+      // The cluster is up..
+      assertEquals(true, cluster.getDataNodes().get(0)
+          .isBPServiceAlive(cluster.getNamesystem().getBlockPoolId()));
+      // but there has been a single volume failure
+      DFSTestUtil.waitForDatanodeStatus(dm, 1, 0, 1,
+          origCapacity / 2, WAIT_FOR_HEARTBEATS);
+    } finally {
+      FileUtil.chmod(dir.toString(), "755");
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java Tue Aug 16 00:37:15 2011
@@ -28,7 +28,6 @@ import java.net.URLEncoder;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.jsp.JspWriter;
 
-import org.apache.commons.httpclient.util.URIUtil;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -36,6 +35,7 @@ import org.apache.hadoop.hdfs.HdfsConfig
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.util.ServletUtil;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -71,7 +71,7 @@ public class TestDatanodeJsp {
     
     if (!doTail) {
       assertTrue("page should show link to download file", viewFilePage
-          .contains("/streamFile" + URIUtil.encodePath(testPath.toString()) +
+          .contains("/streamFile" + ServletUtil.encodePath(testPath.toString()) +
               "?nnaddr=localhost:" + nnIpcAddress.getPort()));
     }
   }
@@ -82,15 +82,22 @@ public class TestDatanodeJsp {
     try {
       cluster = new MiniDFSCluster.Builder(CONF).build();
       cluster.waitActive();
-      
-      testViewingFile(cluster, "/test-file", false);
-      testViewingFile(cluster, "/tmp/test-file", false);
-      testViewingFile(cluster, "/tmp/test-file%with goofy&characters", false);
-      
-      testViewingFile(cluster, "/test-file", true);
-      testViewingFile(cluster, "/tmp/test-file", true);
-      testViewingFile(cluster, "/tmp/test-file%with goofy&characters", true);
-      
+      String paths[] = {
+        "/test-file",
+        "/tmp/test-file",
+        "/tmp/test-file%with goofy&characters",
+        "/foo bar/foo bar",
+        "/foo+bar/foo+bar",
+        "/foo;bar/foo;bar",
+        "/foo=bar/foo=bar",
+        "/foo,bar/foo,bar",
+        "/foo?bar/foo?bar",
+        "/foo\">bar/foo\">bar"
+      };
+      for (String p : paths) {
+        testViewingFile(cluster, p, false);
+        testViewingFile(cluster, p, true);
+      }
     } finally {
       if (cluster != null) {
         cluster.shutdown();

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java Tue Aug 16 00:37:15 2011
@@ -24,25 +24,27 @@ import java.io.IOException;
 import java.io.RandomAccessFile;
 import java.net.URI;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collections;
-import java.util.Comparator;
 import java.util.HashMap;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 import java.util.Set;
 
+import org.apache.commons.logging.Log;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
-import org.apache.hadoop.hdfs.server.namenode.FSImageTransactionalStorageInspector.FoundEditLog;
-import org.apache.hadoop.hdfs.server.namenode.FSImageTransactionalStorageInspector.FoundFSImage;
+import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
+import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.util.MD5FileUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.mockito.Mockito;
 
 import com.google.common.base.Joiner;
-import com.google.common.collect.ComparisonChain;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
@@ -110,6 +112,40 @@ public abstract class FSImageTestUtil {
     return sd;
   }
   
+  /**
+   * Make a mock storage directory that returns some set of file contents.
+   * @param type type of storage dir
+   * @param previousExists should we mock that the previous/ dir exists?
+   * @param fileNames the names of files contained in current/
+   */
+  static StorageDirectory mockStorageDirectory(
+      StorageDirType type,
+      boolean previousExists,
+      String...  fileNames) {
+    StorageDirectory sd = mock(StorageDirectory.class);
+    
+    doReturn(type).when(sd).getStorageDirType();
+  
+    // Version file should always exist
+    doReturn(mockFile(true)).when(sd).getVersionFile();
+    
+    // Previous dir optionally exists
+    doReturn(mockFile(previousExists))
+      .when(sd).getPreviousDir();   
+  
+    // Return a mock 'current' directory which has the given paths
+    File[] files = new File[fileNames.length];
+    for (int i = 0; i < fileNames.length; i++) {
+      files[i] = new File(fileNames[i]);
+    }
+    
+    File mockDir = Mockito.spy(new File("/dir/current"));
+    doReturn(files).when(mockDir).listFiles();
+    doReturn(mockDir).when(sd).getCurrentDir();
+    
+    return sd;
+  }
+  
   static File mockFile(boolean exists) {
     File mockFile = mock(File.class);
     doReturn(exists).when(mockFile).exists();
@@ -154,9 +190,9 @@ public abstract class FSImageTestUtil {
     for (File dir : dirs) {
       FSImageTransactionalStorageInspector inspector =
         inspectStorageDirectory(dir, NameNodeDirType.IMAGE);
-      FoundFSImage latestImage = inspector.getLatestImage();
+      FSImageFile latestImage = inspector.getLatestImage();
       assertNotNull("No image in " + dir, latestImage);      
-      long thisTxId = latestImage.getTxId();
+      long thisTxId = latestImage.getCheckpointTxId();
       if (imageTxId != -1 && thisTxId != imageTxId) {
         fail("Storage directory " + dir + " does not have the same " +
             "last image index " + imageTxId + " as another");
@@ -283,7 +319,7 @@ public abstract class FSImageTestUtil {
       new FSImageTransactionalStorageInspector();
     inspector.inspectDirectory(sd);
 
-    FoundFSImage latestImage = inspector.getLatestImage();
+    FSImageFile latestImage = inspector.getLatestImage();
     return (latestImage == null) ? null : latestImage.getFile();
   }
 
@@ -316,23 +352,15 @@ public abstract class FSImageTestUtil {
    * @return the latest edits log, finalized or otherwise, from the given
    * storage directory.
    */
-  public static FoundEditLog findLatestEditsLog(StorageDirectory sd)
+  public static EditLogFile findLatestEditsLog(StorageDirectory sd)
   throws IOException {
     FSImageTransactionalStorageInspector inspector =
       new FSImageTransactionalStorageInspector();
     inspector.inspectDirectory(sd);
     
-    List<FoundEditLog> foundEditLogs = Lists.newArrayList(
-        inspector.getFoundEditLogs());
-    return Collections.max(foundEditLogs, new Comparator<FoundEditLog>() {
-      @Override
-      public int compare(FoundEditLog a, FoundEditLog b) {
-        return ComparisonChain.start()
-          .compare(a.getStartTxId(), b.getStartTxId())
-          .compare(a.getLastTxId(), b.getLastTxId())
-          .result();
-      }
-    });
+    List<EditLogFile> foundEditLogs = Lists.newArrayList(
+        inspector.getEditLogFiles());
+    return Collections.max(foundEditLogs, EditLogFile.COMPARE_BY_START_TXID);
   }
 
   /**
@@ -371,5 +399,16 @@ public abstract class FSImageTestUtil {
     assertNotNull(image);
   }
 
-
+  public static void logStorageContents(Log LOG, NNStorage storage) {
+    LOG.info("current storages and corresponding sizes:");
+    for (StorageDirectory sd : storage.dirIterable(null)) {
+      File curDir = sd.getCurrentDir();
+      LOG.info("In directory " + curDir);
+      File[] files = curDir.listFiles();
+      Arrays.sort(files);
+      for (File f : files) {
+        LOG.info("  file " + f.getAbsolutePath() + "; len = " + f.length());  
+      }
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Tue Aug 16 00:37:15 2011
@@ -1128,7 +1128,8 @@ public class NNThroughputBenchmark {
       // decommission data-nodes
       decommissionNodes();
       // set node replication limit
-      namesystem.setNodeReplicationLimit(nodeReplicationLimit);
+      BlockManagerTestUtil.setNodeReplicationLimit(namesystem.getBlockManager(),
+          nodeReplicationLimit);
     }
 
     private void decommissionNodes() throws IOException {
@@ -1171,9 +1172,7 @@ public class NNThroughputBenchmark {
     void printResults() {
       String blockDistribution = "";
       String delim = "(";
-      int totalReplicas = 0;
       for(int idx=0; idx < blockReportObject.getNumDatanodes(); idx++) {
-        totalReplicas += blockReportObject.datanodes[idx].nrBlocks;
         blockDistribution += delim + blockReportObject.datanodes[idx].nrBlocks;
         delim = ", ";
       }

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java Tue Aug 16 00:37:15 2011
@@ -19,10 +19,12 @@ package org.apache.hadoop.hdfs.server.na
 
 import java.io.IOException;
 
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.ipc.Server;
 
 /**
@@ -52,7 +54,33 @@ public class NameNodeAdapter {
   public static Server getRpcServer(NameNode namenode) {
     return namenode.server;
   }
+
+  public static DelegationTokenSecretManager getDtSecretManager(
+      final FSNamesystem ns) {
+    return ns.getDelegationTokenSecretManager();
+  }
+
+  public static DatanodeCommand[] sendHeartBeat(DatanodeRegistration nodeReg,
+      DatanodeDescriptor dd, FSNamesystem namesystem) throws IOException {
+    return namesystem.handleHeartbeat(nodeReg, dd.getCapacity(), 
+        dd.getDfsUsed(), dd.getRemaining(), dd.getBlockPoolUsed(), 0, 0, 0);
+  }
+
+  public static boolean setReplication(final FSNamesystem ns,
+      final String src, final short replication) throws IOException {
+    return ns.setReplication(src, replication);
+  }
   
+  public static LeaseManager getLeaseManager(final FSNamesystem ns) {
+    return ns.leaseManager;
+  }
+
+  /** Set the softLimit and hardLimit of client lease periods. */
+  public static void setLeasePeriod(final FSNamesystem namesystem, long soft, long hard) {
+    getLeaseManager(namesystem).setLeasePeriod(soft, hard);
+    namesystem.lmthread.interrupt();
+  }
+
   public static String getLeaseHolderForPath(NameNode namenode, String path) {
     return namenode.getNamesystem().leaseManager.getLeaseByPath(path).getHolder();
   }

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java Tue Aug 16 00:37:15 2011
@@ -33,7 +33,7 @@ import org.apache.hadoop.hdfs.HdfsConfig
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
-import org.apache.hadoop.hdfs.server.namenode.FSImageTransactionalStorageInspector.FoundEditLog;
+import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
@@ -163,8 +163,8 @@ public class TestBackupNode extends Test
       
       // When shutting down the BN, it shouldn't finalize logs that are
       // still open on the NN
-      FoundEditLog editsLog = FSImageTestUtil.findLatestEditsLog(sd);
-      assertEquals(editsLog.getStartTxId(),
+      EditLogFile editsLog = FSImageTestUtil.findLatestEditsLog(sd);
+      assertEquals(editsLog.getFirstTxId(),
           nn.getFSImage().getEditLog().getCurSegmentTxId());
       assertTrue("Should not have finalized " + editsLog,
           editsLog.isInProgress());

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java Tue Aug 16 00:37:15 2011
@@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
-import org.apache.hadoop.hdfs.server.namenode.FSImageTransactionalStorageInspector.FoundEditLog;
+import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -82,7 +82,7 @@ public class TestCheckPointForSecurityTo
       // verify that the edits file is NOT empty
       NameNode nn = cluster.getNameNode();
       for (StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
-        FoundEditLog log = FSImageTestUtil.findLatestEditsLog(sd);
+        EditLogFile log = FSImageTestUtil.findLatestEditsLog(sd);
         assertTrue(log.isInProgress());
         assertEquals("In-progress log " + log + " should have 5 transactions",
             5, log.validateLog().numTransactions);
@@ -97,7 +97,7 @@ public class TestCheckPointForSecurityTo
       }
       // verify that the edits file is empty except for the START txn
       for (StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
-        FoundEditLog log = FSImageTestUtil.findLatestEditsLog(sd);
+        EditLogFile log = FSImageTestUtil.findLatestEditsLog(sd);
         assertTrue(log.isInProgress());
         assertEquals("In-progress log " + log + " should only have START txn",
             1, log.validateLog().numTransactions);

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java Tue Aug 16 00:37:15 2011
@@ -1450,7 +1450,7 @@ public class TestCheckpoint extends Test
 
       // Make a finalized log on the server side. 
       nn.rollEditLog();
-      RemoteEditLogManifest manifest = nn.getEditLogManifest(0);
+      RemoteEditLogManifest manifest = nn.getEditLogManifest(1);
       RemoteEditLog log = manifest.getLogs().get(0);
       
       NNStorage dstImage = Mockito.mock(NNStorage.class);

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java Tue Aug 16 00:37:15 2011
@@ -24,6 +24,7 @@ import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.Iterator;
+import java.util.List;
 import java.util.Random;
 
 import org.apache.hadoop.conf.Configuration;
@@ -37,6 +38,7 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -146,7 +148,7 @@ public class TestDecommissioningStatus {
   /*
    * Decommissions the node at the given index
    */
-  private String decommissionNode(FSNamesystem namesystem, Configuration conf,
+  private String decommissionNode(FSNamesystem namesystem,
       DFSClient client, FileSystem localFileSys, int nodeIndex)
       throws IOException {
     DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
@@ -158,7 +160,6 @@ public class TestDecommissioningStatus {
     ArrayList<String> nodes = new ArrayList<String>(decommissionedNodes);
     nodes.add(nodename);
     writeConfigFile(localFileSys, excludeFile, nodes);
-    namesystem.refreshNodes(conf);
     return nodename;
   }
 
@@ -199,13 +200,13 @@ public class TestDecommissioningStatus {
     Thread.sleep(5000);
 
     FSNamesystem fsn = cluster.getNamesystem();
+    final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager();
     for (int iteration = 0; iteration < numDatanodes; iteration++) {
-      String downnode = decommissionNode(fsn, conf, client, localFileSys,
-          iteration);
+      String downnode = decommissionNode(fsn, client, localFileSys, iteration);
+      dm.refreshNodes(conf);
       decommissionedNodes.add(downnode);
       Thread.sleep(5000);
-      ArrayList<DatanodeDescriptor> decommissioningNodes = fsn
-          .getDecommissioningNodes();
+      final List<DatanodeDescriptor> decommissioningNodes = dm.getDecommissioningNodes();
       if (iteration == 0) {
         assertEquals(decommissioningNodes.size(), 1);
         DatanodeDescriptor decommNode = decommissioningNodes.get(0);
@@ -222,7 +223,7 @@ public class TestDecommissioningStatus {
     // This will remove the datanodes from decommissioning list and
     // make them available again.
     writeConfigFile(localFileSys, excludeFile, null);
-    fsn.refreshNodes(conf);
+    dm.refreshNodes(conf);
     st1.close();
     cleanupFile(fileSys, file1);
     cleanupFile(fileSys, file2);

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Tue Aug 16 00:37:15 2011
@@ -22,9 +22,15 @@ import java.io.*;
 import java.net.URI;
 import java.util.Collection;
 import java.util.Iterator;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Arrays;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -49,6 +55,10 @@ import org.apache.log4j.Level;
 import org.aspectj.util.FileUtil;
 
 import org.mockito.Mockito;
+import org.junit.Test;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
 
 import static org.apache.hadoop.test.MetricsAsserts.*;
 
@@ -676,28 +686,44 @@ public class TestEditLog extends TestCas
   private static class EditLogByteInputStream extends EditLogInputStream {
     private InputStream input;
     private long len;
+    private int version;
+    private FSEditLogOp.Reader reader = null;
+    private FSEditLogLoader.PositionTrackingInputStream tracker = null;
 
-    public EditLogByteInputStream(byte[] data) {
+    public EditLogByteInputStream(byte[] data) throws IOException {
       len = data.length;
       input = new ByteArrayInputStream(data);
-    }
 
-    public int available() throws IOException {
-      return input.available();
-    }
-    
-    public int read() throws IOException {
-      return input.read();
+      BufferedInputStream bin = new BufferedInputStream(input);
+      DataInputStream in = new DataInputStream(bin);
+      version = EditLogFileInputStream.readLogVersion(in);
+      tracker = new FSEditLogLoader.PositionTrackingInputStream(in);
+      in = new DataInputStream(tracker);
+            
+      reader = new FSEditLogOp.Reader(in, version);
     }
     
+    @Override
     public long length() throws IOException {
       return len;
     }
-    
-    public int read(byte[] b, int off, int len) throws IOException {
-      return input.read(b, off, len);
+  
+    @Override
+    public long getPosition() {
+      return tracker.getPos();
+    }
+
+    @Override
+    public FSEditLogOp readOp() throws IOException {
+      return reader.readOp();
+    }
+
+    @Override
+    public int getVersion() throws IOException {
+      return version;
     }
 
+    @Override
     public void close() throws IOException {
       input.close();
     }
@@ -729,4 +755,103 @@ public class TestEditLog extends TestCas
       log.close();
     }
   }
+
+  /**
+   * Tests the getEditLogManifest function using mock storage for a number
+   * of different situations.
+   */
+  @Test
+  public void testEditLogManifestMocks() throws IOException {
+    NNStorage storage;
+    FSEditLog log;
+    // Simple case - different directories have the same
+    // set of logs, with an in-progress one at end
+    storage = mockStorageWithEdits(
+        "[1,100]|[101,200]|[201,]",
+        "[1,100]|[101,200]|[201,]");
+    log = new FSEditLog(storage);
+    assertEquals("[[1,100], [101,200]]",
+        log.getEditLogManifest(1).toString());
+    assertEquals("[[101,200]]",
+        log.getEditLogManifest(101).toString());
+
+    // Another simple case, different directories have different
+    // sets of files
+    storage = mockStorageWithEdits(
+        "[1,100]|[101,200]",
+        "[1,100]|[201,300]|[301,400]"); // nothing starting at 101
+    log = new FSEditLog(storage);
+    assertEquals("[[1,100], [101,200], [201,300], [301,400]]",
+        log.getEditLogManifest(1).toString());
+    
+    // Case where one directory has an earlier finalized log, followed
+    // by a gap. The returned manifest should start after the gap.
+    storage = mockStorageWithEdits(
+        "[1,100]|[301,400]", // gap from 101 to 300
+        "[301,400]|[401,500]");
+    log = new FSEditLog(storage);
+    assertEquals("[[301,400], [401,500]]",
+        log.getEditLogManifest(1).toString());
+    
+    // Case where different directories have different length logs
+    // starting at the same txid - should pick the longer one
+    storage = mockStorageWithEdits(
+        "[1,100]|[101,150]", // short log at 101
+        "[1,50]|[101,200]"); // short log at 1
+    log = new FSEditLog(storage);
+    assertEquals("[[1,100], [101,200]]",
+        log.getEditLogManifest(1).toString());
+    assertEquals("[[101,200]]",
+        log.getEditLogManifest(101).toString());
+
+    // Case where the first storage has an inprogress while
+    // the second has finalised that file (i.e. the first failed
+    // recently)
+    storage = mockStorageWithEdits(
+        "[1,100]|[101,]", 
+        "[1,100]|[101,200]"); 
+    log = new FSEditLog(storage);
+    assertEquals("[[1,100], [101,200]]",
+        log.getEditLogManifest(1).toString());
+    assertEquals("[[101,200]]",
+        log.getEditLogManifest(101).toString());
+  }
+  
+  /**
+   * Create a mock NNStorage object with several directories, each directory
+   * holding edit logs according to a specification. Each directory
+   * is specified by a pipe-separated string. For example:
+   * <code>[1,100]|[101,200]</code> specifies a directory which
+   * includes two finalized segments, one from 1-100, and one from 101-200.
+   * The syntax <code>[1,]</code> specifies an in-progress log starting at
+   * txid 1.
+   */
+  private NNStorage mockStorageWithEdits(String... editsDirSpecs) {
+    List<StorageDirectory> sds = Lists.newArrayList();
+    for (String dirSpec : editsDirSpecs) {
+      List<String> files = Lists.newArrayList();
+      String[] logSpecs = dirSpec.split("\\|");
+      for (String logSpec : logSpecs) {
+        Matcher m = Pattern.compile("\\[(\\d+),(\\d+)?\\]").matcher(logSpec);
+        assertTrue("bad spec: " + logSpec, m.matches());
+        if (m.group(2) == null) {
+          files.add(NNStorage.getInProgressEditsFileName(
+              Long.valueOf(m.group(1))));
+        } else {
+          files.add(NNStorage.getFinalizedEditsFileName(
+              Long.valueOf(m.group(1)),
+              Long.valueOf(m.group(2))));
+        }
+      }
+      sds.add(FSImageTestUtil.mockStorageDirectory(
+          NameNodeDirType.EDITS, false,
+          files.toArray(new String[0])));
+    }
+    
+    NNStorage storage = Mockito.mock(NNStorage.class);
+    Mockito.doReturn(sds).when(storage).dirIterable(NameNodeDirType.EDITS);
+    return storage;
+  }
+  
+  
 }

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java Tue Aug 16 00:37:15 2011
@@ -61,7 +61,7 @@ public class TestEditLogFileOutputStream
       .getStorage().getStorageDir(0);
     File editLog = NNStorage.getInProgressEditsFile(sd, 1);
 
-    EditLogValidation validation = FSEditLogLoader.validateEditLog(editLog);
+    EditLogValidation validation = EditLogFileInputStream.validateEditLog(editLog);
     assertEquals("Edit log should contain a header as valid length",
         HEADER_LEN, validation.validLength);
     assertEquals(1, validation.numTransactions);
@@ -73,7 +73,7 @@ public class TestEditLogFileOutputStream
         new FsPermission((short)777));
 
     long oldLength = validation.validLength;
-    validation = FSEditLogLoader.validateEditLog(editLog);
+    validation = EditLogFileInputStream.validateEditLog(editLog);
     assertTrue("Edit log should have more valid data after writing a txn " +
         "(was: " + oldLength + " now: " + validation.validLength + ")",
         validation.validLength > oldLength);

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java Tue Aug 16 00:37:15 2011
@@ -186,7 +186,7 @@ public class TestFSEditLogLoader {
 
     // Make sure that uncorrupted log has the expected length and number
     // of transactions.
-    EditLogValidation validation = FSEditLogLoader.validateEditLog(logFile);
+    EditLogValidation validation = EditLogFileInputStream.validateEditLog(logFile);
     assertEquals(NUM_TXNS + 2, validation.numTransactions);
     assertEquals(validLength, validation.validLength);
     
@@ -202,7 +202,7 @@ public class TestFSEditLogLoader {
       // Restore backup, truncate the file exactly before the txn
       Files.copy(logFileBak, logFile);
       truncateFile(logFile, txOffset);
-      validation = FSEditLogLoader.validateEditLog(logFile);
+      validation = EditLogFileInputStream.validateEditLog(logFile);
       assertEquals("Failed when truncating to length " + txOffset,
           txid - 1, validation.numTransactions);
       assertEquals(txOffset, validation.validLength);
@@ -211,7 +211,7 @@ public class TestFSEditLogLoader {
       // also isn't valid
       Files.copy(logFileBak, logFile);
       truncateFile(logFile, txOffset + 1);
-      validation = FSEditLogLoader.validateEditLog(logFile);
+      validation = EditLogFileInputStream.validateEditLog(logFile);
       assertEquals("Failed when truncating to length " + (txOffset + 1),
           txid - 1, validation.numTransactions);
       assertEquals(txOffset, validation.validLength);
@@ -219,7 +219,7 @@ public class TestFSEditLogLoader {
       // Restore backup, corrupt the txn opcode
       Files.copy(logFileBak, logFile);
       corruptByteInFile(logFile, txOffset);
-      validation = FSEditLogLoader.validateEditLog(logFile);
+      validation = EditLogFileInputStream.validateEditLog(logFile);
       assertEquals("Failed when corrupting txn opcode at " + txOffset,
           txid - 1, validation.numTransactions);
       assertEquals(txOffset, validation.validLength);
@@ -227,7 +227,7 @@ public class TestFSEditLogLoader {
       // Restore backup, corrupt a byte a few bytes into the txn
       Files.copy(logFileBak, logFile);
       corruptByteInFile(logFile, txOffset+5);
-      validation = FSEditLogLoader.validateEditLog(logFile);
+      validation = EditLogFileInputStream.validateEditLog(logFile);
       assertEquals("Failed when corrupting txn data at " + (txOffset+5),
           txid - 1, validation.numTransactions);
       assertEquals(txOffset, validation.validLength);
@@ -240,7 +240,7 @@ public class TestFSEditLogLoader {
     for (long offset = 0; offset < validLength; offset++) {
       Files.copy(logFileBak, logFile);
       corruptByteInFile(logFile, offset);
-      EditLogValidation val = FSEditLogLoader.validateEditLog(logFile);
+      EditLogValidation val = EditLogFileInputStream.validateEditLog(logFile);
       assertTrue(val.numTransactions >= prevNumValid);
       prevNumValid = val.numTransactions;
     }

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java Tue Aug 16 00:37:15 2011
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.na
 import static org.junit.Assert.*;
 
 import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
 
 import java.io.File;
@@ -29,15 +28,14 @@ import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName;
 import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName;
 import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName;
 
-import org.apache.hadoop.hdfs.server.namenode.FSImageTransactionalStorageInspector.FoundEditLog;
-import org.apache.hadoop.hdfs.server.namenode.FSImageTransactionalStorageInspector.FoundFSImage;
+import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
+import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTransactionalStorageInspector.TransactionalLoadPlan;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTransactionalStorageInspector.LogGroup;
 import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.LoadPlan;
@@ -56,7 +54,7 @@ public class TestFSImageStorageInspector
     FSImageTransactionalStorageInspector inspector = 
         new FSImageTransactionalStorageInspector();
     
-    StorageDirectory mockDir = mockDirectory(
+    StorageDirectory mockDir = FSImageTestUtil.mockStorageDirectory(
         NameNodeDirType.IMAGE_AND_EDITS,
         false,
         "/foo/current/" + getImageFileName(123),
@@ -72,7 +70,7 @@ public class TestFSImageStorageInspector
     assertEquals(2, inspector.foundImages.size());
     assertTrue(inspector.foundEditLogs.get(1).isInProgress());
     
-    FoundFSImage latestImage = inspector.getLatestImage();
+    FSImageFile latestImage = inspector.getLatestImage();
     assertEquals(456, latestImage.txId);
     assertSame(mockDir, latestImage.sd);
     assertTrue(inspector.isUpgradeFinalized());
@@ -95,7 +93,7 @@ public class TestFSImageStorageInspector
     FSImageTransactionalStorageInspector inspector =
         new FSImageTransactionalStorageInspector();
     
-    StorageDirectory mockDir = mockDirectory(
+    StorageDirectory mockDir = FSImageTestUtil.mockStorageDirectory(
         NameNodeDirType.IMAGE_AND_EDITS,
         false,
         "/foo/current/" + getImageFileName(123),
@@ -123,7 +121,7 @@ public class TestFSImageStorageInspector
     FSImageTransactionalStorageInspector inspector =
         new FSImageTransactionalStorageInspector();
     
-    StorageDirectory mockDir = mockDirectory(
+    StorageDirectory mockDir = FSImageTestUtil.mockStorageDirectory(
         NameNodeDirType.IMAGE_AND_EDITS,
         false,
         "/foo/current/" + getImageFileName(123),
@@ -196,14 +194,14 @@ public class TestFSImageStorageInspector
     inspector.inspectDirectory(
         mockDirectoryWithEditLogs("/foo3/current/"
                                   + getInProgressEditsFileName(123)));
-    inspector.inspectDirectory(mockDirectory(
+    inspector.inspectDirectory(FSImageTestUtil.mockStorageDirectory(
         NameNodeDirType.IMAGE,
         false,
         "/foo4/current/" + getImageFileName(122)));
 
     LogGroup lg = inspector.logGroups.get(123L);
     assertEquals(3, lg.logs.size());
-    FoundEditLog inProgressLog = lg.logs.get(2);
+    EditLogFile inProgressLog = lg.logs.get(2);
     assertTrue(inProgressLog.isInProgress());
     
     LoadPlan plan = inspector.createLoadPlan();
@@ -282,7 +280,7 @@ public class TestFSImageStorageInspector
     assertTrue(lg.logs.get(2).isCorrupt());
     
     // Calling recover should move it aside
-    FoundEditLog badLog = lg.logs.get(2);
+    EditLogFile badLog = lg.logs.get(2);
     Mockito.doNothing().when(badLog).moveAsideCorruptFile();
     Mockito.doNothing().when(lg.logs.get(0)).finalizeLog();
     Mockito.doNothing().when(lg.logs.get(1)).finalizeLog();
@@ -303,12 +301,12 @@ public class TestFSImageStorageInspector
       String path, int numValidTransactions) throws IOException {
     
     for (LogGroup lg : inspector.logGroups.values()) {
-      List<FoundEditLog> logs = lg.logs;
+      List<EditLogFile> logs = lg.logs;
       for (int i = 0; i < logs.size(); i++) {
-        FoundEditLog log = logs.get(i);
-        if (log.file.getPath().equals(path)) {
+        EditLogFile log = logs.get(i);
+        if (log.getFile().getPath().equals(path)) {
           // mock out its validation
-          FoundEditLog spyLog = spy(log);
+          EditLogFile spyLog = spy(log);
           doReturn(new FSEditLogLoader.EditLogValidation(-1, numValidTransactions))
             .when(spyLog).validateLog();
           logs.set(i, spyLog);
@@ -327,15 +325,15 @@ public class TestFSImageStorageInspector
     FSImageTransactionalStorageInspector inspector =
         new FSImageTransactionalStorageInspector();
     
-    StorageDirectory mockImageDir = mockDirectory(
+    StorageDirectory mockImageDir = FSImageTestUtil.mockStorageDirectory(
         NameNodeDirType.IMAGE,
         false,
         "/foo/current/" + getImageFileName(123));
-    StorageDirectory mockImageDir2 = mockDirectory(
+    StorageDirectory mockImageDir2 = FSImageTestUtil.mockStorageDirectory(
         NameNodeDirType.IMAGE,
         false,
         "/foo2/current/" + getImageFileName(456));
-    StorageDirectory mockEditsDir = mockDirectory(
+    StorageDirectory mockEditsDir = FSImageTestUtil.mockStorageDirectory(
         NameNodeDirType.EDITS,
         false,
         "/foo3/current/" + getFinalizedEditsFileName(123, 456),
@@ -356,7 +354,7 @@ public class TestFSImageStorageInspector
     // Check plan
     TransactionalLoadPlan plan =
       (TransactionalLoadPlan)inspector.createLoadPlan();
-    FoundFSImage pickedImage = plan.image;
+    FSImageFile pickedImage = plan.image;
     assertEquals(456, pickedImage.txId);
     assertSame(mockImageDir2, pickedImage.sd);
     assertEquals(new File("/foo2/current/" + getImageFileName(456)),
@@ -364,43 +362,8 @@ public class TestFSImageStorageInspector
     assertArrayEquals(new File[] {
         new File("/foo3/current/" + getInProgressEditsFileName(457))
       }, plan.getEditsFiles().toArray(new File[0]));
-
-    // Check log manifest
-    assertEquals("[[123,456]]", inspector.getEditLogManifest(123).toString());
-    assertEquals("[[123,456]]", inspector.getEditLogManifest(456).toString());
-    assertEquals("[]", inspector.getEditLogManifest(457).toString());
   }
   
-  @Test
-  public void testLogManifest() throws IOException { 
-    FSImageTransactionalStorageInspector inspector =
-        new FSImageTransactionalStorageInspector();
-    inspector.inspectDirectory(
-        mockDirectoryWithEditLogs("/foo1/current/" 
-                                  + getFinalizedEditsFileName(1,1),
-                                  "/foo1/current/"
-                                  + getFinalizedEditsFileName(2,200)));
-    inspector.inspectDirectory(
-        mockDirectoryWithEditLogs("/foo2/current/" 
-                                  + getInProgressEditsFileName(1),
-                                  "/foo2/current/"
-                                  + getFinalizedEditsFileName(201, 400)));
-    inspector.inspectDirectory(
-        mockDirectoryWithEditLogs("/foo3/current/"
-                                  + getFinalizedEditsFileName(1, 1),
-                                  "/foo3/current/"
-                                  + getFinalizedEditsFileName(2,200)));
-    
-    assertEquals("[[1,1], [2,200], [201,400]]",
-                 inspector.getEditLogManifest(1).toString());
-    assertEquals("[[2,200], [201,400]]",
-                 inspector.getEditLogManifest(2).toString());
-    assertEquals("[[2,200], [201,400]]",
-                 inspector.getEditLogManifest(10).toString());
-    assertEquals("[[201,400]]",
-                 inspector.getEditLogManifest(201).toString());
-  }  
-
   /**
    * Test case where an in-progress log is in an earlier name directory
    * than a finalized log. Previously, getEditLogManifest wouldn't
@@ -426,46 +389,9 @@ public class TestFSImageStorageInspector
                                   + getFinalizedEditsFileName(2626,2627),
                                   "/foo2/current/"
                                   + getFinalizedEditsFileName(2628,2629)));
-    
-    assertEquals("[[2622,2623], [2624,2625], [2626,2627], [2628,2629]]",
-                 inspector.getEditLogManifest(2621).toString());
   }  
   
-  private StorageDirectory mockDirectoryWithEditLogs(String... fileNames) {
-    return mockDirectory(NameNodeDirType.EDITS, false, fileNames);
-  }
-  
-  /**
-   * Make a mock storage directory that returns some set of file contents.
-   * @param type type of storage dir
-   * @param previousExists should we mock that the previous/ dir exists?
-   * @param fileNames the names of files contained in current/
-   */
-  static StorageDirectory mockDirectory(
-      StorageDirType type,
-      boolean previousExists,
-      String...  fileNames) {
-    StorageDirectory sd = mock(StorageDirectory.class);
-    
-    doReturn(type).when(sd).getStorageDirType();
-
-    // Version file should always exist
-    doReturn(FSImageTestUtil.mockFile(true)).when(sd).getVersionFile();
-    
-    // Previous dir optionally exists
-    doReturn(FSImageTestUtil.mockFile(previousExists))
-      .when(sd).getPreviousDir();   
-
-    // Return a mock 'current' directory which has the given paths
-    File[] files = new File[fileNames.length];
-    for (int i = 0; i < fileNames.length; i++) {
-      files[i] = new File(fileNames[i]);
-    }
-    
-    File mockDir = Mockito.spy(new File("/dir/current"));
-    doReturn(files).when(mockDir).listFiles();
-    doReturn(mockDir).when(sd).getCurrentDir();
-    
-    return sd;
+  static StorageDirectory mockDirectoryWithEditLogs(String... fileNames) {
+    return FSImageTestUtil.mockStorageDirectory(NameNodeDirType.EDITS, false, fileNames);
   }
 }

Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java Tue Aug 16 00:37:15 2011
@@ -24,8 +24,8 @@ import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
-import org.apache.hadoop.hdfs.server.namenode.FSImageTransactionalStorageInspector.FoundEditLog;
-import org.apache.hadoop.hdfs.server.namenode.FSImageTransactionalStorageInspector.FoundFSImage;
+import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
+import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName;
 import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName;
@@ -168,14 +168,14 @@ public class TestNNStorageRetentionManag
 
     StoragePurger mockPurger =
       Mockito.mock(NNStorageRetentionManager.StoragePurger.class);
-    ArgumentCaptor<FoundFSImage> imagesPurgedCaptor =
-      ArgumentCaptor.forClass(FoundFSImage.class);    
-    ArgumentCaptor<FoundEditLog> logsPurgedCaptor =
-      ArgumentCaptor.forClass(FoundEditLog.class);    
+    ArgumentCaptor<FSImageFile> imagesPurgedCaptor =
+      ArgumentCaptor.forClass(FSImageFile.class);    
+    ArgumentCaptor<EditLogFile> logsPurgedCaptor =
+      ArgumentCaptor.forClass(EditLogFile.class);    
 
     // Ask the manager to purge files we don't need any more
     new NNStorageRetentionManager(conf,
-        tc.mockStorage(), tc.mockEditLog(), mockPurger)
+        tc.mockStorage(), tc.mockEditLog(mockPurger), mockPurger)
       .purgeOldStorage();
     
     // Verify that it asked the purger to remove the correct files
@@ -186,7 +186,7 @@ public class TestNNStorageRetentionManag
 
     // Check images
     Set<String> purgedPaths = Sets.newHashSet();
-    for (FoundFSImage purged : imagesPurgedCaptor.getAllValues()) {
+    for (FSImageFile purged : imagesPurgedCaptor.getAllValues()) {
       purgedPaths.add(purged.getFile().toString());
     }    
     Assert.assertEquals(Joiner.on(",").join(tc.expectedPurgedImages),
@@ -194,7 +194,7 @@ public class TestNNStorageRetentionManag
 
     // Check images
     purgedPaths.clear();
-    for (FoundEditLog purged : logsPurgedCaptor.getAllValues()) {
+    for (EditLogFile purged : logsPurgedCaptor.getAllValues()) {
       purgedPaths.add(purged.getFile().toString());
     }    
     Assert.assertEquals(Joiner.on(",").join(tc.expectedPurgedLogs),
@@ -216,7 +216,7 @@ public class TestNNStorageRetentionManag
       }
 
       StorageDirectory mockStorageDir() {
-        return TestFSImageStorageInspector.mockDirectory(
+        return FSImageTestUtil.mockStorageDirectory(
             type, false,
             files.toArray(new String[0]));
       }
@@ -256,13 +256,14 @@ public class TestNNStorageRetentionManag
       return mockStorageForDirs(sds.toArray(new StorageDirectory[0]));
     }
     
-    public FSEditLog mockEditLog() {
+    public FSEditLog mockEditLog(StoragePurger purger) {
       final List<JournalManager> jms = Lists.newArrayList();
       for (FakeRoot root : dirRoots.values()) {
         if (!root.type.isOfType(NameNodeDirType.EDITS)) continue;
         
         FileJournalManager fjm = new FileJournalManager(
             root.mockStorageDir());
+        fjm.purger = purger;
         jms.add(fjm);
       }
 
@@ -272,17 +273,15 @@ public class TestNNStorageRetentionManag
         @Override
         public Void answer(InvocationOnMock invocation) throws Throwable {
           Object[] args = invocation.getArguments();
-          assert args.length == 2;
+          assert args.length == 1;
           long txId = (Long) args[0];
-          StoragePurger purger = (StoragePurger) args[1];
           
           for (JournalManager jm : jms) {
-            jm.purgeLogsOlderThan(txId, purger);
+            jm.purgeLogsOlderThan(txId);
           }
           return null;
         }
-      }).when(mockLog).purgeLogsOlderThan(
-          Mockito.anyLong(), (StoragePurger) Mockito.anyObject());
+      }).when(mockLog).purgeLogsOlderThan(Mockito.anyLong());
       return mockLog;
     }
   }



Mime
View raw message