hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a..@apache.org
Subject svn commit: r1516230 [2/2] - in /hadoop/common/branches/HDFS-2832/hadoop-hdfs-project: hadoop-hdfs-nfs/ hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/ hadoop-hdfs/ hadoop-hdfs/src/contrib/bkjournal/ hadoop-hdfs/src/main/java/ hadoop-hdf...
Date Wed, 21 Aug 2013 17:47:17 GMT
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Wed Aug 21 17:47:10 2013
@@ -256,19 +256,15 @@ public class SecondaryNameNode implement
 
     // initialize the webserver for uploading files.
     int tmpInfoPort = infoSocAddr.getPort();
-    infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort,
-                                tmpInfoPort == 0, conf,
-                                new AccessControlList(conf.get(DFS_ADMIN, " "))) {
-      {
-        if (UserGroupInformation.isSecurityEnabled()) {
-          initSpnego(
-              conf,
-              DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY,
-              DFSUtil.getSpnegoKeytabKey(conf,
-                  DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY));
-        }
-      }
-    };
+    infoServer = new HttpServer.Builder().setName("secondary")
+        .setBindAddress(infoBindAddress).setPort(tmpInfoPort)
+        .setFindPort(tmpInfoPort == 0).setConf(conf).setACL(
+            new AccessControlList(conf.get(DFS_ADMIN, " ")))
+        .setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
+        .setUsernameConfKey(
+            DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY)
+        .setKeytabConfKey(DFSUtil.getSpnegoKeytabKey(conf,
+            DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY)).build();
     infoServer.setAttribute("secondary.name.node", this);
     infoServer.setAttribute("name.system.image", checkpointImage);
     infoServer.setAttribute(JspHelper.CURRENT_CONF, conf);

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java Wed Aug 21 17:47:10 2013
@@ -226,7 +226,7 @@ public class BootstrapStandby implements
     try {
       Collection<EditLogInputStream> streams =
         image.getEditLog().selectInputStreams(
-          firstTxIdInLogs, curTxIdOnOtherNode, null, true);
+          firstTxIdInLogs, curTxIdOnOtherNode, null, true, false);
       for (EditLogInputStream stream : streams) {
         IOUtils.closeStream(stream);
       }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java Wed Aug 21 17:47:10 2013
@@ -17,9 +17,17 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
+import static org.apache.hadoop.util.Time.now;
+
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.security.PrivilegedAction;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.ThreadFactory;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -38,10 +46,10 @@ import org.apache.hadoop.hdfs.util.Cance
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
-import static org.apache.hadoop.util.Time.now;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 /**
  * Thread which runs inside the NN when it's in Standby state,
@@ -57,6 +65,7 @@ public class StandbyCheckpointer {
   private final FSNamesystem namesystem;
   private long lastCheckpointTime;
   private final CheckpointerThread thread;
+  private final ThreadFactory uploadThreadFactory;
   private String activeNNAddress;
   private InetSocketAddress myNNAddress;
 
@@ -72,6 +81,8 @@ public class StandbyCheckpointer {
     this.namesystem = ns;
     this.checkpointConf = new CheckpointConf(conf); 
     this.thread = new CheckpointerThread();
+    this.uploadThreadFactory = new ThreadFactoryBuilder().setDaemon(true)
+        .setNameFormat("TransferFsImageUpload-%d").build();
 
     setNameNodeAddresses(conf);
   }
@@ -142,7 +153,7 @@ public class StandbyCheckpointer {
 
   private void doCheckpoint() throws InterruptedException, IOException {
     assert canceler != null;
-    long txid;
+    final long txid;
     
     namesystem.writeLockInterruptibly();
     try {
@@ -171,9 +182,26 @@ public class StandbyCheckpointer {
     }
     
     // Upload the saved checkpoint back to the active
-    TransferFsImage.uploadImageFromStorage(
-        activeNNAddress, myNNAddress,
-        namesystem.getFSImage().getStorage(), txid);
+    // Do this in a separate thread to avoid blocking transition to active
+    // See HDFS-4816
+    ExecutorService executor =
+        Executors.newSingleThreadExecutor(uploadThreadFactory);
+    Future<Void> upload = executor.submit(new Callable<Void>() {
+      @Override
+      public Void call() throws IOException {
+        TransferFsImage.uploadImageFromStorage(
+            activeNNAddress, myNNAddress,
+            namesystem.getFSImage().getStorage(), txid);
+        return null;
+      }
+    });
+    executor.shutdown();
+    try {
+      upload.get();
+    } catch (ExecutionException e) {
+      throw new IOException("Exception during image upload: " + e.getMessage(),
+          e.getCause());
+    }
   }
   
   /**
@@ -301,6 +329,7 @@ public class StandbyCheckpointer {
           LOG.info("Checkpoint was cancelled: " + ce.getMessage());
           canceledCount++;
         } catch (InterruptedException ie) {
+          LOG.info("Interrupted during checkpointing", ie);
           // Probably requested shutdown.
           continue;
         } catch (Throwable t) {

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto Wed Aug 21 17:47:10 2013
@@ -31,6 +31,7 @@ option java_generic_services = true;
 option java_generate_equals_and_hash = true;
 package hadoop.hdfs;
 
+import "HAServiceProtocol.proto";
 import "hdfs.proto";
 
 /**
@@ -185,11 +186,7 @@ message StorageReportProto {
  * txid - Highest transaction ID this NN has seen
  */
 message NNHAStatusHeartbeatProto {
-  enum State {
-    ACTIVE = 0;
-    STANDBY = 1;
-  }
-  required State state = 1; 
+  required hadoop.common.HAServiceStateProto state = 1;
   required uint64 txid = 2;
 }
 

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto Wed Aug 21 17:47:10 2013
@@ -177,6 +177,7 @@ message GetEditLogManifestRequestProto {
   required uint64 sinceTxId = 2;  // Transaction ID
   // Whether or not the client will be reading from the returned streams.
   optional bool forReading = 3 [default = true];
+  optional bool inProgressOk = 4 [default = false];
 }
 
 message GetEditLogManifestResponseProto {

Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1513717-1516228

Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1513717-1516228

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html Wed Aug 21 17:47:10 2013
@@ -1,5 +1,3 @@
-<meta HTTP-EQUIV="REFRESH" content="0;url=dfshealth.jsp"/>
-<html>
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -16,6 +14,8 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
+<meta HTTP-EQUIV="REFRESH" content="0;url=dfshealth.jsp"/>
+<html>
 <head>
 <title>Hadoop Administration</title>
 </head>

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java Wed Aug 21 17:47:10 2013
@@ -20,7 +20,6 @@ package org.apache.hadoop.fs;
 import static org.junit.Assert.*;
 
 import java.io.IOException;
-import java.util.Arrays;
 import java.util.regex.Pattern;
 
 import org.apache.commons.lang.StringUtils;
@@ -30,8 +29,6 @@ import org.apache.hadoop.hdfs.HdfsConfig
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.junit.*;
 
-import com.google.common.base.Joiner;
-
 public class TestGlobPaths {
 
   static class RegexPathFilter implements PathFilter {
@@ -50,6 +47,7 @@ public class TestGlobPaths {
 
   static private MiniDFSCluster dfsCluster;
   static private FileSystem fs;
+  static private FileContext fc;
   static final private int NUM_OF_PATHS = 4;
   static private String USER_DIR;
   private Path[] path = new Path[NUM_OF_PATHS];
@@ -59,6 +57,7 @@ public class TestGlobPaths {
     Configuration conf = new HdfsConfiguration();
     dfsCluster = new MiniDFSCluster.Builder(conf).build();
     fs = FileSystem.get(conf);
+    fc = FileContext.getFileContext(conf);
     USER_DIR = fs.getHomeDirectory().toUri().getPath().toString();
   }
   
@@ -466,6 +465,9 @@ public class TestGlobPaths {
   
   @Test
   public void pTestEscape() throws IOException {
+    // Skip the test case on Windows because backslash will be treated as a
+    // path separator instead of an escaping character on Windows.
+    org.junit.Assume.assumeTrue(!Path.WINDOWS);
     try {
       String [] files = new String[] {USER_DIR+"/ab\\[c.d"};
       Path[] matchedPath = prepareTesting(USER_DIR+"/ab\\[c.d", files);
@@ -620,21 +622,7 @@ public class TestGlobPaths {
       cleanupDFS();
     }
   }
-  
-  @Test
-  public void pTestRelativePath() throws IOException {
-    try {
-      String [] files = new String[] {"a", "abc", "abc.p", "bacd"};
-      Path[] matchedPath = prepareTesting("a*", files);
-      assertEquals(matchedPath.length, 3);
-      assertEquals(matchedPath[0], new Path(USER_DIR, path[0]));
-      assertEquals(matchedPath[1], new Path(USER_DIR, path[1]));
-      assertEquals(matchedPath[2], new Path(USER_DIR, path[2]));
-    } finally {
-      cleanupDFS();
-    }
-  }
-  
+
   /* Test {xx,yy} */
   @Test
   public void pTestCurlyBracket() throws IOException {
@@ -800,28 +788,24 @@ public class TestGlobPaths {
   /**
    * Run a glob test on FileSystem.
    */
-  private static void testOnFileSystem(FSTestWrapperGlobTest test) throws Exception {
-    Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+  private void testOnFileSystem(FSTestWrapperGlobTest test) throws Exception {
     try {
-      FileSystem fs = FileSystem.get(conf);
+      fc.mkdir(new Path(USER_DIR), FsPermission.getDefault(), true);
       test.run(new FileSystemTestWrapper(fs), fs, null);
     } finally {
-      cluster.shutdown();
+      fc.delete(new Path(USER_DIR), true);
     }
   }
 
   /**
    * Run a glob test on FileContext.
    */
-  private static void testOnFileContext(FSTestWrapperGlobTest test) throws Exception {
-    Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+  private void testOnFileContext(FSTestWrapperGlobTest test) throws Exception {
     try {
-      FileContext fc = FileContext.getFileContext(conf);
+      fs.mkdirs(new Path(USER_DIR));
       test.run(new FileContextTestWrapper(fc), null, fc);
     } finally {
-      cluster.shutdown();
+      cleanupDFS();
     }
   }
   
@@ -854,32 +838,33 @@ public class TestGlobPaths {
         throws Exception {
       // Test that globbing through a symlink to a directory yields a path
       // containing that symlink.
-      wrap.mkdir(new Path("/alpha"),
-          FsPermission.getDirDefault(), false);
-      wrap.createSymlink(new Path("/alpha"), new Path("/alphaLink"), false);
-      wrap.mkdir(new Path("/alphaLink/beta"),
+      wrap.mkdir(new Path(USER_DIR + "/alpha"), FsPermission.getDirDefault(),
+          false);
+      wrap.createSymlink(new Path(USER_DIR + "/alpha"), new Path(USER_DIR
+          + "/alphaLink"), false);
+      wrap.mkdir(new Path(USER_DIR + "/alphaLink/beta"),
           FsPermission.getDirDefault(), false);
       // Test simple glob
-      FileStatus[] statuses =
-          wrap.globStatus(new Path("/alpha/*"), new AcceptAllPathFilter());
+      FileStatus[] statuses = wrap.globStatus(new Path(USER_DIR + "/alpha/*"),
+          new AcceptAllPathFilter());
       Assert.assertEquals(1, statuses.length);
-      Assert.assertEquals("/alpha/beta",
-          statuses[0].getPath().toUri().getPath());
+      Assert.assertEquals(USER_DIR + "/alpha/beta", statuses[0].getPath()
+          .toUri().getPath());
       // Test glob through symlink
-      statuses =
-          wrap.globStatus(new Path("/alphaLink/*"), new AcceptAllPathFilter());
+      statuses = wrap.globStatus(new Path(USER_DIR + "/alphaLink/*"),
+          new AcceptAllPathFilter());
       Assert.assertEquals(1, statuses.length);
-      Assert.assertEquals("/alphaLink/beta",
-          statuses[0].getPath().toUri().getPath());
+      Assert.assertEquals(USER_DIR + "/alphaLink/beta", statuses[0].getPath()
+          .toUri().getPath());
       // If the terminal path component in a globbed path is a symlink,
       // we don't dereference that link.
-      wrap.createSymlink(new Path("beta"), new Path("/alphaLink/betaLink"),
-          false);
-      statuses = wrap.globStatus(new Path("/alpha/betaLi*"),
+      wrap.createSymlink(new Path("beta"), new Path(USER_DIR
+          + "/alphaLink/betaLink"), false);
+      statuses = wrap.globStatus(new Path(USER_DIR + "/alpha/betaLi*"),
           new AcceptAllPathFilter());
       Assert.assertEquals(1, statuses.length);
-      Assert.assertEquals("/alpha/betaLink",
-          statuses[0].getPath().toUri().getPath());
+      Assert.assertEquals(USER_DIR + "/alpha/betaLink", statuses[0].getPath()
+          .toUri().getPath());
       // todo: test symlink-to-symlink-to-dir, etc.
     }
   }
@@ -899,58 +884,64 @@ public class TestGlobPaths {
    *
    * Also test globbing dangling symlinks.  It should NOT throw any exceptions!
    */
-  private static class TestGlobWithSymlinksToSymlinks
-      implements FSTestWrapperGlobTest {
+  private static class TestGlobWithSymlinksToSymlinks implements
+      FSTestWrapperGlobTest {
     public void run(FSTestWrapper wrap, FileSystem fs, FileContext fc)
         throws Exception {
       // Test that globbing through a symlink to a symlink to a directory
       // fully resolves
-      wrap.mkdir(new Path("/alpha"), FsPermission.getDirDefault(), false);
-      wrap.createSymlink(new Path("/alpha"), new Path("/alphaLink"), false);
-      wrap.createSymlink(new Path("/alphaLink"),
-          new Path("/alphaLinkLink"), false);
-      wrap.mkdir(new Path("/alpha/beta"), FsPermission.getDirDefault(), false);
+      wrap.mkdir(new Path(USER_DIR + "/alpha"), FsPermission.getDirDefault(),
+          false);
+      wrap.createSymlink(new Path(USER_DIR + "/alpha"), new Path(USER_DIR
+          + "/alphaLink"), false);
+      wrap.createSymlink(new Path(USER_DIR + "/alphaLink"), new Path(USER_DIR
+          + "/alphaLinkLink"), false);
+      wrap.mkdir(new Path(USER_DIR + "/alpha/beta"),
+          FsPermission.getDirDefault(), false);
       // Test glob through symlink to a symlink to a directory
-      FileStatus statuses[] =
-          wrap.globStatus(new Path("/alphaLinkLink"), new AcceptAllPathFilter());
+      FileStatus statuses[] = wrap.globStatus(new Path(USER_DIR
+          + "/alphaLinkLink"), new AcceptAllPathFilter());
       Assert.assertEquals(1, statuses.length);
-      Assert.assertEquals("/alphaLinkLink",
-          statuses[0].getPath().toUri().getPath());
-      statuses =
-          wrap.globStatus(new Path("/alphaLinkLink/*"), new AcceptAllPathFilter());
+      Assert.assertEquals(USER_DIR + "/alphaLinkLink", statuses[0].getPath()
+          .toUri().getPath());
+      statuses = wrap.globStatus(new Path(USER_DIR + "/alphaLinkLink/*"),
+          new AcceptAllPathFilter());
       Assert.assertEquals(1, statuses.length);
-      Assert.assertEquals("/alphaLinkLink/beta",
-          statuses[0].getPath().toUri().getPath());
+      Assert.assertEquals(USER_DIR + "/alphaLinkLink/beta", statuses[0]
+          .getPath().toUri().getPath());
       // Test glob of dangling symlink (theta does not actually exist)
-      wrap.createSymlink(new Path("theta"), new Path("/alpha/kappa"), false);
-      statuses = wrap.globStatus(new Path("/alpha/kappa/kappa"),
-              new AcceptAllPathFilter());
+      wrap.createSymlink(new Path(USER_DIR + "theta"), new Path(USER_DIR
+          + "/alpha/kappa"), false);
+      statuses = wrap.globStatus(new Path(USER_DIR + "/alpha/kappa/kappa"),
+          new AcceptAllPathFilter());
       Assert.assertNull(statuses);
       // Test glob of symlinks
-      wrap.createFile("/alpha/beta/gamma");
-      wrap.createSymlink(new Path("gamma"),
-          new Path("/alpha/beta/gammaLink"), false);
-      wrap.createSymlink(new Path("gammaLink"),
-          new Path("/alpha/beta/gammaLinkLink"), false);
-      wrap.createSymlink(new Path("gammaLinkLink"),
-          new Path("/alpha/beta/gammaLinkLinkLink"), false);
-      statuses = wrap.globStatus(new Path("/alpha/*/gammaLinkLinkLink"),
-              new AcceptAllPathFilter());
+      wrap.createFile(USER_DIR + "/alpha/beta/gamma");
+      wrap.createSymlink(new Path(USER_DIR + "gamma"), new Path(USER_DIR
+          + "/alpha/beta/gammaLink"), false);
+      wrap.createSymlink(new Path(USER_DIR + "gammaLink"), new Path(USER_DIR
+          + "/alpha/beta/gammaLinkLink"), false);
+      wrap.createSymlink(new Path(USER_DIR + "gammaLinkLink"), new Path(
+          USER_DIR + "/alpha/beta/gammaLinkLinkLink"), false);
+      statuses = wrap.globStatus(new Path(USER_DIR
+          + "/alpha/*/gammaLinkLinkLink"), new AcceptAllPathFilter());
       Assert.assertEquals(1, statuses.length);
-      Assert.assertEquals("/alpha/beta/gammaLinkLinkLink",
+      Assert.assertEquals(USER_DIR + "/alpha/beta/gammaLinkLinkLink",
           statuses[0].getPath().toUri().getPath());
-      statuses = wrap.globStatus(new Path("/alpha/beta/*"),
-              new AcceptAllPathFilter());
-      Assert.assertEquals("/alpha/beta/gamma;/alpha/beta/gammaLink;" +
-          "/alpha/beta/gammaLinkLink;/alpha/beta/gammaLinkLinkLink",
+      statuses = wrap.globStatus(new Path(USER_DIR + "/alpha/beta/*"),
+          new AcceptAllPathFilter());
+      Assert.assertEquals(USER_DIR + "/alpha/beta/gamma;" + USER_DIR
+          + "/alpha/beta/gammaLink;" + USER_DIR + "/alpha/beta/gammaLinkLink;"
+          + USER_DIR + "/alpha/beta/gammaLinkLinkLink",
           TestPath.mergeStatuses(statuses));
       // Let's create two symlinks that point to each other, and glob on them.
-      wrap.createSymlink(new Path("tweedledee"),
-          new Path("/tweedledum"), false);
-      wrap.createSymlink(new Path("tweedledum"),
-          new Path("/tweedledee"), false);
-      statuses = wrap.globStatus(new Path("/tweedledee/unobtainium"),
-              new AcceptAllPathFilter());
+      wrap.createSymlink(new Path(USER_DIR + "tweedledee"), new Path(USER_DIR
+          + "/tweedledum"), false);
+      wrap.createSymlink(new Path(USER_DIR + "tweedledum"), new Path(USER_DIR
+          + "/tweedledee"), false);
+      statuses = wrap.globStatus(
+          new Path(USER_DIR + "/tweedledee/unobtainium"),
+          new AcceptAllPathFilter());
       Assert.assertNull(statuses);
     }
   }
@@ -968,34 +959,39 @@ public class TestGlobPaths {
   /**
    * Test globbing symlinks with a custom PathFilter
    */
-  private static class TestGlobSymlinksWithCustomPathFilter
-      implements FSTestWrapperGlobTest {
+  private static class TestGlobSymlinksWithCustomPathFilter implements
+      FSTestWrapperGlobTest {
     public void run(FSTestWrapper wrap, FileSystem fs, FileContext fc)
         throws Exception {
       // Test that globbing through a symlink to a symlink to a directory
       // fully resolves
-      wrap.mkdir(new Path("/alpha"), FsPermission.getDirDefault(), false);
-      wrap.createSymlink(new Path("/alpha"), new Path("/alphaLinkz"), false);
-      wrap.mkdir(new Path("/alpha/beta"), FsPermission.getDirDefault(), false);
-      wrap.mkdir(new Path("/alpha/betaz"), FsPermission.getDirDefault(), false);
-      // Test glob through symlink to a symlink to a directory, with a PathFilter
-      FileStatus statuses[] =
-          wrap.globStatus(new Path("/alpha/beta"), new AcceptPathsEndingInZ());
+      wrap.mkdir(new Path(USER_DIR + "/alpha"), FsPermission.getDirDefault(),
+          false);
+      wrap.createSymlink(new Path(USER_DIR + "/alpha"), new Path(USER_DIR
+          + "/alphaLinkz"), false);
+      wrap.mkdir(new Path(USER_DIR + "/alpha/beta"),
+          FsPermission.getDirDefault(), false);
+      wrap.mkdir(new Path(USER_DIR + "/alpha/betaz"),
+          FsPermission.getDirDefault(), false);
+      // Test glob through symlink to a symlink to a directory, with a
+      // PathFilter
+      FileStatus statuses[] = wrap.globStatus(
+          new Path(USER_DIR + "/alpha/beta"), new AcceptPathsEndingInZ());
       Assert.assertNull(statuses);
-      statuses =
-          wrap.globStatus(new Path("/alphaLinkz/betaz"), new AcceptPathsEndingInZ());
+      statuses = wrap.globStatus(new Path(USER_DIR + "/alphaLinkz/betaz"),
+          new AcceptPathsEndingInZ());
       Assert.assertEquals(1, statuses.length);
-      Assert.assertEquals("/alphaLinkz/betaz",
-          statuses[0].getPath().toUri().getPath());
-      statuses =
-          wrap.globStatus(new Path("/*/*"), new AcceptPathsEndingInZ());
-      Assert.assertEquals("/alpha/betaz;/alphaLinkz/betaz",
-          TestPath.mergeStatuses(statuses));
-      statuses =
-          wrap.globStatus(new Path("/*/*"), new AcceptAllPathFilter());
-      Assert.assertEquals("/alpha/beta;/alpha/betaz;" +
-          "/alphaLinkz/beta;/alphaLinkz/betaz",
-          TestPath.mergeStatuses(statuses));
+      Assert.assertEquals(USER_DIR + "/alphaLinkz/betaz", statuses[0].getPath()
+          .toUri().getPath());
+      statuses = wrap.globStatus(new Path(USER_DIR + "/*/*"),
+          new AcceptPathsEndingInZ());
+      Assert.assertEquals(USER_DIR + "/alpha/betaz;" + USER_DIR
+          + "/alphaLinkz/betaz", TestPath.mergeStatuses(statuses));
+      statuses = wrap.globStatus(new Path(USER_DIR + "/*/*"),
+          new AcceptAllPathFilter());
+      Assert.assertEquals(USER_DIR + "/alpha/beta;" + USER_DIR
+          + "/alpha/betaz;" + USER_DIR + "/alphaLinkz/beta;" + USER_DIR
+          + "/alphaLinkz/betaz", TestPath.mergeStatuses(statuses));
     }
   }
 
@@ -1012,24 +1008,25 @@ public class TestGlobPaths {
   /**
    * Test that globStatus fills in the scheme even when it is not provided.
    */
-  private static class TestGlobFillsInScheme
-      implements FSTestWrapperGlobTest {
-    public void run(FSTestWrapper wrap, FileSystem fs, FileContext fc) 
+  private static class TestGlobFillsInScheme implements FSTestWrapperGlobTest {
+    public void run(FSTestWrapper wrap, FileSystem fs, FileContext fc)
         throws Exception {
       // Verify that the default scheme is hdfs, when we don't supply one.
-      wrap.mkdir(new Path("/alpha"), FsPermission.getDirDefault(), false);
-      wrap.createSymlink(new Path("/alpha"), new Path("/alphaLink"), false);
-      FileStatus statuses[] =
-          wrap.globStatus(new Path("/alphaLink"), new AcceptAllPathFilter());
+      wrap.mkdir(new Path(USER_DIR + "/alpha"), FsPermission.getDirDefault(),
+          false);
+      wrap.createSymlink(new Path(USER_DIR + "/alpha"), new Path(USER_DIR
+          + "/alphaLink"), false);
+      FileStatus statuses[] = wrap.globStatus(
+          new Path(USER_DIR + "/alphaLink"), new AcceptAllPathFilter());
       Assert.assertEquals(1, statuses.length);
       Path path = statuses[0].getPath();
-      Assert.assertEquals("/alphaLink", path.toUri().getPath());
+      Assert.assertEquals(USER_DIR + "/alphaLink", path.toUri().getPath());
       Assert.assertEquals("hdfs", path.toUri().getScheme());
       if (fc != null) {
         // If we're using FileContext, then we can list a file:/// URI.
         // Since everyone should have the root directory, we list that.
-        statuses =
-            wrap.globStatus(new Path("file:///"), new AcceptAllPathFilter());
+        statuses = wrap.globStatus(new Path("file:///"),
+            new AcceptAllPathFilter());
         Assert.assertEquals(1, statuses.length);
         Path filePath = statuses[0].getPath();
         Assert.assertEquals("file", filePath.toUri().getScheme());
@@ -1050,4 +1047,43 @@ public class TestGlobPaths {
   public void testGlobFillsInSchemeOnFC() throws Exception {
     testOnFileContext(new TestGlobFillsInScheme());
   }
+
+  /**
+   * Test that globStatus works with relative paths.
+   **/
+  private static class TestRelativePath implements FSTestWrapperGlobTest {
+    public void run(FSTestWrapper wrap, FileSystem fs, FileContext fc)
+      throws Exception {
+      String[] files = new String[] { "a", "abc", "abc.p", "bacd" };
+
+      Path[] path = new Path[files.length];
+      for(int i=0; i <  files.length; i++) {
+        path[i] = wrap.makeQualified(new Path(files[i]));
+        wrap.mkdir(path[i], FsPermission.getDirDefault(), true);
+      }
+
+      Path patternPath = new Path("a*");
+      Path[] globResults = FileUtil.stat2Paths(wrap.globStatus(patternPath,
+            new AcceptAllPathFilter()),
+          patternPath);
+
+      for(int i=0; i < globResults.length; i++) {
+        globResults[i] = wrap.makeQualified(globResults[i]);
+      }
+
+      assertEquals(globResults.length, 3);
+      assertEquals(USER_DIR + "/a;" + USER_DIR + "/abc;" + USER_DIR + "/abc.p",
+                    TestPath.mergeStatuses(globResults));
+    }
+  }
+
+  @Test
+  public void testRelativePathOnFS() throws Exception {
+    testOnFileSystem(new TestRelativePath());
+  }
+
+  @Test
+  public void testRelativePathOnFC() throws Exception {
+    testOnFileContext(new TestRelativePath());
+  }
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java Wed Aug 21 17:47:10 2013
@@ -59,6 +59,9 @@ public class TestHDFSFileContextMainOper
     defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
         UserGroupInformation.getCurrentUser().getShortUserName()));
     fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
+    // Make defaultWorkingDirectory snapshottable to enable 
+    // testGlobStatusFilterWithHiddenPathTrivialFilter
+    cluster.getFileSystem().allowSnapshot(defaultWorkingDirectory);
   }
 
   private static void restartCluster() throws IOException, LoginException {
@@ -73,6 +76,9 @@ public class TestHDFSFileContextMainOper
     defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
         UserGroupInformation.getCurrentUser().getShortUserName()));
     fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
+    // Make defaultWorkingDirectory snapshottable to enable 
+    // testGlobStatusFilterWithHiddenPathTrivialFilter
+    cluster.getFileSystem().allowSnapshot(defaultWorkingDirectory);
   }
       
   @AfterClass
@@ -93,6 +99,11 @@ public class TestHDFSFileContextMainOper
   }
 
   @Override
+  protected Path getHiddenPathForTest() {
+    return new Path(defaultWorkingDirectory, ".snapshot");
+  }
+  
+  @Override
   protected Path getDefaultWorkingDirectory() {
     return defaultWorkingDirectory;
   } 

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java Wed Aug 21 17:47:10 2013
@@ -440,7 +440,8 @@ public class TestDatanodeBlockScanner {
     }
   }
   
-  private static final String BASE_PATH = "/data/current/finalized";
+  private static final String BASE_PATH = (new File("/data/current/finalized"))
+      .getAbsolutePath();
   
   @Test
   public void testReplicaInfoParsing() throws Exception {

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java Wed Aug 21 17:47:10 2013
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 import java.io.File;
@@ -337,4 +338,47 @@ public class TestFileAppend{
       cluster.shutdown();
     }
   }
+  
+  /** Tests appending after soft-limit expires. */
+  @Test
+  public void testAppendAfterSoftLimit() 
+      throws IOException, InterruptedException {
+    Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
+    conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
+    //Set small soft-limit for lease
+    final long softLimit = 1L;
+    final long hardLimit = 9999999L;
+
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+        .build();
+    cluster.setLeasePeriod(softLimit, hardLimit);
+    cluster.waitActive();
+
+    FileSystem fs = cluster.getFileSystem();
+    FileSystem fs2 = new DistributedFileSystem();
+    fs2.initialize(fs.getUri(), conf);
+
+    final Path testPath = new Path("/testAppendAfterSoftLimit");
+    final byte[] fileContents = AppendTestUtil.initBuffer(32);
+
+    // create a new file without closing
+    FSDataOutputStream out = fs.create(testPath);
+    out.write(fileContents);
+
+    //Wait for > soft-limit
+    Thread.sleep(250);
+
+    try {
+      FSDataOutputStream appendStream2 = fs2.append(testPath);
+      appendStream2.write(fileContents);
+      appendStream2.close();
+      assertEquals(fileContents.length, fs.getFileStatus(testPath).getLen());
+    } finally {
+      fs.close();
+      fs2.close();
+      cluster.shutdown();
+    }
+  }
+
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java Wed Aug 21 17:47:10 2013
@@ -394,12 +394,12 @@ public class TestDirectoryScanner {
     
     @Override
     public String getBasePath() {
-      return "/base";
+      return (new File("/base")).getAbsolutePath();
     }
     
     @Override
     public String getPath(String bpid) throws IOException {
-      return "/base/current/" + bpid;
+      return (new File("/base/current/" + bpid)).getAbsolutePath();
     }
 
     @Override
@@ -416,8 +416,6 @@ public class TestDirectoryScanner {
       
   void testScanInfoObject(long blockId, File blockFile, File metaFile)
       throws Exception {
-    assertEquals("/base/current/" + BPID_1 + "/finalized",
-        TEST_VOLUME.getFinalizedDir(BPID_1).getAbsolutePath());
     DirectoryScanner.ScanInfo scanInfo =
         new DirectoryScanner.ScanInfo(blockId, blockFile, metaFile, TEST_VOLUME);
     assertEquals(blockId, scanInfo.getBlockId());

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java Wed Aug 21 17:47:10 2013
@@ -62,6 +62,7 @@ import org.mockito.Mockito;
 
 import com.google.common.base.Joiner;
 import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
@@ -555,4 +556,16 @@ public abstract class FSImageTestUtil {
   public static long getNSQuota(FSNamesystem ns) {
     return ns.dir.rootDir.getNsQuota();
   }
+  
+  public static void assertNNFilesMatch(MiniDFSCluster cluster) throws Exception {
+    List<File> curDirs = Lists.newArrayList();
+    curDirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0));
+    curDirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 1));
+    
+    // Ignore seen_txid file, since the newly bootstrapped standby
+    // will have a higher seen_txid than the one it bootstrapped from.
+    Set<String> ignoredFiles = ImmutableSet.of("seen_txid");
+    FSImageTestUtil.assertParallelFilesAreIdentical(curDirs,
+        ignoredFiles);
+  }
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Wed Aug 21 17:47:10 2013
@@ -61,6 +61,8 @@ import org.apache.hadoop.net.NetworkTopo
 import org.apache.hadoop.security.Groups;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
@@ -100,7 +102,7 @@ import org.apache.log4j.LogManager;
  * Then the benchmark executes the specified number of operations using 
  * the specified number of threads and outputs the resulting stats.
  */
-public class NNThroughputBenchmark {
+public class NNThroughputBenchmark implements Tool {
   private static final Log LOG = LogFactory.getLog(NNThroughputBenchmark.class);
   private static final int BLOCK_SIZE = 16;
   private static final String GENERAL_OPTIONS_USAGE = 
@@ -115,6 +117,8 @@ public class NNThroughputBenchmark {
     // We do not need many handlers, since each thread simulates a handler
     // by calling name-node methods directly
     config.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 1);
+    // Turn off minimum block size verification
+    config.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
     // set exclude file
     config.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE,
       "${hadoop.tmp.dir}/dfs/hosts/exclude");
@@ -129,14 +133,11 @@ public class NNThroughputBenchmark {
     config.set(DFSConfigKeys.DFS_HOSTS, "${hadoop.tmp.dir}/dfs/hosts/include");
     File includeFile = new File(config.get(DFSConfigKeys.DFS_HOSTS, "include"));
     new FileOutputStream(includeFile).close();
-    // Start the NameNode
-    String[] argv = new String[] {};
-    nameNode = NameNode.createNameNode(argv, config);
-    nameNodeProto = nameNode.getRpcServer();
   }
 
   void close() {
-    nameNode.stop();
+    if(nameNode != null)
+      nameNode.stop();
   }
 
   static void setNameNodeLoggingLevel(Level logLevel) {
@@ -1290,52 +1291,69 @@ public class NNThroughputBenchmark {
     System.exit(-1);
   }
 
+  public static void runBenchmark(Configuration conf, List<String> args)
+      throws Exception {
+    NNThroughputBenchmark bench = null;
+    try {
+      bench = new NNThroughputBenchmark(conf);
+      bench.run(args.toArray(new String[]{}));
+    } finally {
+      if(bench != null)
+        bench.close();
+    }
+  }
+
   /**
    * Main method of the benchmark.
    * @param args command line parameters
    */
-  public static void runBenchmark(Configuration conf, List<String> args) throws Exception {
+  @Override // Tool
+  public int run(String[] aArgs) throws Exception {
+    List<String> args = new ArrayList<String>(Arrays.asList(aArgs));
     if(args.size() < 2 || ! args.get(0).startsWith("-op"))
       printUsage();
 
     String type = args.get(1);
     boolean runAll = OperationStatsBase.OP_ALL_NAME.equals(type);
 
-    NNThroughputBenchmark bench = null;
+    // Start the NameNode
+    String[] argv = new String[] {};
+    nameNode = NameNode.createNameNode(argv, config);
+    nameNodeProto = nameNode.getRpcServer();
+
     List<OperationStatsBase> ops = new ArrayList<OperationStatsBase>();
     OperationStatsBase opStat = null;
     try {
-      bench = new NNThroughputBenchmark(conf);
       if(runAll || CreateFileStats.OP_CREATE_NAME.equals(type)) {
-        opStat = bench.new CreateFileStats(args);
+        opStat = new CreateFileStats(args);
         ops.add(opStat);
       }
       if(runAll || OpenFileStats.OP_OPEN_NAME.equals(type)) {
-        opStat = bench.new OpenFileStats(args);
+        opStat = new OpenFileStats(args);
         ops.add(opStat);
       }
       if(runAll || DeleteFileStats.OP_DELETE_NAME.equals(type)) {
-        opStat = bench.new DeleteFileStats(args);
+        opStat = new DeleteFileStats(args);
         ops.add(opStat);
       }
       if(runAll || FileStatusStats.OP_FILE_STATUS_NAME.equals(type)) {
-        opStat = bench.new FileStatusStats(args);
+        opStat = new FileStatusStats(args);
         ops.add(opStat);
       }
       if(runAll || RenameFileStats.OP_RENAME_NAME.equals(type)) {
-        opStat = bench.new RenameFileStats(args);
+        opStat = new RenameFileStats(args);
         ops.add(opStat);
       }
       if(runAll || BlockReportStats.OP_BLOCK_REPORT_NAME.equals(type)) {
-        opStat = bench.new BlockReportStats(args);
+        opStat = new BlockReportStats(args);
         ops.add(opStat);
       }
       if(runAll || ReplicationStats.OP_REPLICATION_NAME.equals(type)) {
-        opStat = bench.new ReplicationStats(args);
+        opStat = new ReplicationStats(args);
         ops.add(opStat);
       }
       if(runAll || CleanAllStats.OP_CLEAN_NAME.equals(type)) {
-        opStat = bench.new CleanAllStats(args);
+        opStat = new CleanAllStats(args);
         ops.add(opStat);
       }
       if(ops.size() == 0)
@@ -1354,14 +1372,28 @@ public class NNThroughputBenchmark {
     } catch(Exception e) {
       LOG.error(StringUtils.stringifyException(e));
       throw e;
+    }
+    return 0;
+  }
+
+  public static void main(String[] args) throws Exception {
+    NNThroughputBenchmark bench = null;
+    try {
+      bench = new NNThroughputBenchmark(new HdfsConfiguration());
+      ToolRunner.run(bench, args);
     } finally {
       if(bench != null)
         bench.close();
     }
   }
 
-  public static void main(String[] args) throws Exception {
-    runBenchmark(new HdfsConfiguration(), 
-                  new ArrayList<String>(Arrays.asList(args)));
+  @Override // Configurable
+  public void setConf(Configuration conf) {
+    config = conf;
+  }
+
+  @Override // Configurable
+  public Configuration getConf() {
+    return config;
   }
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java Wed Aug 21 17:47:10 2013
@@ -42,7 +42,8 @@ public class TestEditLogFileInputStream 
   @Test
   public void testReadURL() throws Exception {
     // Start a simple web server which hosts the log data.
-    HttpServer server = new HttpServer("test", "0.0.0.0", 0, true);
+    HttpServer server = new HttpServer.Builder().setName("test")
+        .setBindAddress("0.0.0.0").setPort(0).setFindPort(true).build();
     server.start();
     try {
       server.addServlet("fakeLog", "/fakeLog", FakeLogServlet.class);

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java Wed Aug 21 17:47:10 2013
@@ -479,6 +479,6 @@ public class TestFileJournalManager {
 
   private static String getLogsAsString(
       FileJournalManager fjm, long firstTxId) throws IOException {
-    return Joiner.on(",").join(fjm.getRemoteEditLogs(firstTxId, true));
+    return Joiner.on(",").join(fjm.getRemoteEditLogs(firstTxId, true, false));
   }
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Wed Aug 21 17:47:10 2013
@@ -153,8 +153,8 @@ public class TestFsck {
       String outStr = runFsck(conf, 0, true, "/");
       verifyAuditLogs();
       assertEquals(aTime, fs.getFileStatus(file).getAccessTime());
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
       System.out.println(outStr);
+      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
       if (fs != null) {try{fs.close();} catch(Exception e){}}
       cluster.shutdown();
       
@@ -194,18 +194,30 @@ public class TestFsck {
     // Turn off the logs
     Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
     logger.setLevel(Level.OFF);
-    
-    // Audit log should contain one getfileinfo and one fsck
-    BufferedReader reader = new BufferedReader(new FileReader(auditLogFile));
-    String line = reader.readLine();
-    assertNotNull(line);
-    assertTrue("Expected getfileinfo event not found in audit log",
-        getfileinfoPattern.matcher(line).matches());
-    line = reader.readLine();
-    assertNotNull(line);
-    assertTrue("Expected fsck event not found in audit log",
-        fsckPattern.matcher(line).matches());
-    assertNull("Unexpected event in audit log", reader.readLine());
+
+    BufferedReader reader = null;
+    try {
+      // Audit log should contain one getfileinfo and one fsck
+      reader = new BufferedReader(new FileReader(auditLogFile));
+      String line = reader.readLine();
+      assertNotNull(line);
+      assertTrue("Expected getfileinfo event not found in audit log",
+          getfileinfoPattern.matcher(line).matches());
+      line = reader.readLine();
+      assertNotNull(line);
+      assertTrue("Expected fsck event not found in audit log", fsckPattern
+          .matcher(line).matches());
+      assertNull("Unexpected event in audit log", reader.readLine());
+    } finally {
+      // Close the reader and remove the appender to release the audit log file
+      // handle after verifying the content of the file.
+      if (reader != null) {
+        reader.close();
+      }
+      if (logger != null) {
+        logger.removeAllAppenders();
+      }
+    }
   }
   
   @Test
@@ -963,9 +975,9 @@ public class TestFsck {
       String outStr = runFsck(conf, 0, true, "/");
       verifyAuditLogs();
       assertEquals(aTime, fc.getFileStatus(symlink).getAccessTime());
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-      assertTrue(outStr.contains("Total symlinks:\t\t1\n"));
       System.out.println(outStr);
+      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+      assertTrue(outStr.contains("Total symlinks:\t\t1"));
       util.cleanup(fs, fileName);
     } finally {
       if (fs != null) {try{fs.close();} catch(Exception e){}}

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java Wed Aug 21 17:47:10 2013
@@ -45,6 +45,7 @@ import org.apache.hadoop.fs.PathIsNotDir
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
@@ -901,31 +902,65 @@ public class TestINodeFile {
   @Test
   public void testInodeReplacement() throws Exception {
     final Configuration conf = new Configuration();
-    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
-        numDataNodes(1).build();
-    cluster.waitActive();
-    final DistributedFileSystem hdfs = cluster.getFileSystem();
-    final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
-    
-    final Path dir = new Path("/dir");
-    hdfs.mkdirs(dir);
-    INode dirNode = fsdir.getINode(dir.toString());
-    INode dirNodeFromNode = fsdir.getInode(dirNode.getId());
-    assertSame(dirNode, dirNodeFromNode);
-    
-    // set quota to dir, which leads to node replacement
-    hdfs.setQuota(dir, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
-    dirNode = fsdir.getINode(dir.toString());
-    assertTrue(dirNode instanceof INodeDirectoryWithQuota);
-    // the inode in inodeMap should also be replaced
-    dirNodeFromNode = fsdir.getInode(dirNode.getId());
-    assertSame(dirNode, dirNodeFromNode);
-    
-    hdfs.setQuota(dir, -1, -1);
-    dirNode = fsdir.getINode(dir.toString());
-    assertTrue(dirNode instanceof INodeDirectory);
-    // the inode in inodeMap should also be replaced
-    dirNodeFromNode = fsdir.getInode(dirNode.getId());
-    assertSame(dirNode, dirNodeFromNode);
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+      cluster.waitActive();
+      final DistributedFileSystem hdfs = cluster.getFileSystem();
+      final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
+
+      final Path dir = new Path("/dir");
+      hdfs.mkdirs(dir);
+      INode dirNode = fsdir.getINode(dir.toString());
+      INode dirNodeFromNode = fsdir.getInode(dirNode.getId());
+      assertSame(dirNode, dirNodeFromNode);
+
+      // set quota to dir, which leads to node replacement
+      hdfs.setQuota(dir, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
+      dirNode = fsdir.getINode(dir.toString());
+      assertTrue(dirNode instanceof INodeDirectoryWithQuota);
+      // the inode in inodeMap should also be replaced
+      dirNodeFromNode = fsdir.getInode(dirNode.getId());
+      assertSame(dirNode, dirNodeFromNode);
+
+      hdfs.setQuota(dir, -1, -1);
+      dirNode = fsdir.getINode(dir.toString());
+      assertTrue(dirNode instanceof INodeDirectory);
+      // the inode in inodeMap should also be replaced
+      dirNodeFromNode = fsdir.getInode(dirNode.getId());
+      assertSame(dirNode, dirNodeFromNode);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+  
+  @Test
+  public void testDotdotInodePath() throws Exception {
+    final Configuration conf = new Configuration();
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+      cluster.waitActive();
+      final DistributedFileSystem hdfs = cluster.getFileSystem();
+      final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
+
+      final Path dir = new Path("/dir");
+      hdfs.mkdirs(dir);
+      long dirId = fsdir.getINode(dir.toString()).getId();
+      long parentId = fsdir.getINode("/").getId();
+      String testPath = "/.reserved/.inodes/" + dirId + "/..";
+
+      DFSClient client = new DFSClient(NameNode.getAddress(conf), conf);
+      HdfsFileStatus status = client.getFileInfo(testPath);
+      assertTrue(parentId == status.getFileId());
+      
+      // Test root's parent is still root
+      testPath = "/.reserved/.inodes/" + parentId + "/..";
+      status = client.getFileInfo(testPath);
+      assertTrue(parentId == status.getFileId());
+      
+    } finally {
+      cluster.shutdown();
+    }
   }
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java Wed Aug 21 17:47:10 2013
@@ -120,6 +120,11 @@ public class TestNameNodeMXBean {
       String nameJournalStatus = (String) (mbs.getAttribute(mxbeanName,
           "NameJournalStatus"));
       assertEquals("Bad value for NameJournalStatus", fsn.getNameJournalStatus(), nameJournalStatus);
+      // get attribute JournalTransactionInfo
+      String journalTxnInfo = (String) mbs.getAttribute(mxbeanName,
+          "JournalTransactionInfo");
+      assertEquals("Bad value for NameTxnIds", fsn.getJournalTransactionInfo(),
+          journalTxnInfo);
       // get attribute "NNStarted"
       String nnStarted = (String) mbs.getAttribute(mxbeanName, "NNStarted");
       assertEquals("Bad value for NNStarted", fsn.getNNStarted(), nnStarted);

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java Wed Aug 21 17:47:10 2013
@@ -96,7 +96,7 @@ public class TestNamenodeRetryCache {
    * @throws AccessControlException */
   @After
   public void cleanup() throws IOException {
-    namesystem.delete("/", true);
+    cluster.shutdown();
   }
   
   public static void incrementCallId() {

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java Wed Aug 21 17:47:10 2013
@@ -24,8 +24,6 @@ import static org.junit.Assert.fail;
 import java.io.File;
 import java.io.IOException;
 import java.net.URI;
-import java.util.List;
-import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -45,8 +43,6 @@ import org.junit.Before;
 import org.junit.Test;
 
 import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Lists;
 
 public class TestBootstrapStandby {
   private static final Log LOG = LogFactory.getLog(TestBootstrapStandby.class);
@@ -107,7 +103,7 @@ public class TestBootstrapStandby {
     // Should have copied over the namespace from the active
     FSImageTestUtil.assertNNHasCheckpoints(cluster, 1,
         ImmutableList.of(0));
-    assertNNFilesMatch();
+    FSImageTestUtil.assertNNFilesMatch(cluster);
 
     // We should now be able to start the standby successfully.
     cluster.restartNameNode(1);
@@ -138,7 +134,7 @@ public class TestBootstrapStandby {
     // Should have copied over the namespace from the active
     FSImageTestUtil.assertNNHasCheckpoints(cluster, 1,
         ImmutableList.of((int)expectedCheckpointTxId));
-    assertNNFilesMatch();
+    FSImageTestUtil.assertNNFilesMatch(cluster);
 
     // We should now be able to start the standby successfully.
     cluster.restartNameNode(1);
@@ -208,18 +204,6 @@ public class TestBootstrapStandby {
         cluster.getConfiguration(1));
     assertEquals(0, rc);
   }
-  
-  private void assertNNFilesMatch() throws Exception {
-    List<File> curDirs = Lists.newArrayList();
-    curDirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0));
-    curDirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 1));
-    
-    // Ignore seen_txid file, since the newly bootstrapped standby
-    // will have a higher seen_txid than the one it bootstrapped from.
-    Set<String> ignoredFiles = ImmutableSet.of("seen_txid");
-    FSImageTestUtil.assertParallelFilesAreIdentical(curDirs,
-        ignoredFiles);
-  }
 
   private void removeStandbyNameDirs() {
     for (URI u : cluster.getNameDirs(1)) {

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java Wed Aug 21 17:47:10 2013
@@ -239,6 +239,34 @@ public class TestStandbyCheckpoints {
     
     assertTrue(canceledOne);
   }
+
+  /**
+   * Test cancellation of ongoing checkpoints when failover happens
+   * mid-checkpoint during image upload from standby to active NN.
+   */
+  @Test(timeout=60000)
+  public void testCheckpointCancellationDuringUpload() throws Exception {
+    // don't compress, we want a big image
+    cluster.getConfiguration(0).setBoolean(
+        DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, false);
+    cluster.getConfiguration(1).setBoolean(
+        DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, false);
+    // Throttle SBN upload to make it hang during upload to ANN
+    cluster.getConfiguration(1).setLong(
+        DFSConfigKeys.DFS_IMAGE_TRANSFER_RATE_KEY, 100);
+    cluster.restartNameNode(0);
+    cluster.restartNameNode(1);
+    nn0 = cluster.getNameNode(0);
+    nn1 = cluster.getNameNode(1);
+
+    cluster.transitionToActive(0);
+
+    doEdits(0, 100);
+    HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
+    HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(104));
+    cluster.transitionToStandby(0);
+    cluster.transitionToActive(1);
+  }
   
   /**
    * Make sure that clients will receive StandbyExceptions even when a

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java Wed Aug 21 17:47:10 2013
@@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.DFSTestUti
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
@@ -54,6 +55,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.TestDirectoryTree.Node;
 import org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer;
 import org.apache.hadoop.hdfs.tools.offlineImageViewer.XmlImageVisitor;
+import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
@@ -342,6 +344,37 @@ public class TestSnapshot {
   }
   
   /**
+   * Test creating a snapshot with illegal name
+   */
+  @Test
+  public void testCreateSnapshotWithIllegalName() throws Exception {
+    final Path dir = new Path("/dir");
+    hdfs.mkdirs(dir);
+    
+    final String name1 = HdfsConstants.DOT_SNAPSHOT_DIR;
+    try {
+      hdfs.createSnapshot(dir, name1);
+      fail("Exception expected when an illegal name is given");
+    } catch (RemoteException e) {
+      String errorMsg = "\"" + HdfsConstants.DOT_SNAPSHOT_DIR
+          + "\" is a reserved name.";
+      GenericTestUtils.assertExceptionContains(errorMsg, e);
+    }
+    
+    String errorMsg = "Snapshot name cannot contain \"" + Path.SEPARATOR + "\"";
+    final String[] badNames = new String[] { "foo" + Path.SEPARATOR,
+        Path.SEPARATOR + "foo", Path.SEPARATOR, "foo" + Path.SEPARATOR + "bar" };
+    for (String badName : badNames) {
+      try {
+        hdfs.createSnapshot(dir, badName);
+        fail("Exception expected when an illegal name is given");
+      } catch (RemoteException e) {
+        GenericTestUtils.assertExceptionContains(errorMsg, e);
+      }
+    }
+  }
+  
+  /**
    * Creating snapshots for a directory that is not snapshottable must fail.
    */
   @Test (timeout=60000)

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java Wed Aug 21 17:47:10 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.na
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.util.List;
 
@@ -29,11 +30,14 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Rule;
@@ -190,4 +194,36 @@ public class TestSnapshotRename {
     exception.expectMessage(error);
     hdfs.renameSnapshot(sub1, "s1", "s2");
   }
+  
+  /**
+   * Test renaming a snapshot with illegal name
+   */
+  @Test
+  public void testRenameWithIllegalName() throws Exception {
+    DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
+    // Create snapshots for sub1
+    SnapshotTestHelper.createSnapshot(hdfs, sub1, "s1");
+    
+    final String name1 = HdfsConstants.DOT_SNAPSHOT_DIR;
+    try {
+      hdfs.renameSnapshot(sub1, "s1", name1);
+      fail("Exception expected when an illegal name is given for rename");
+    } catch (RemoteException e) {
+      String errorMsg = "\"" + HdfsConstants.DOT_SNAPSHOT_DIR
+          + "\" is a reserved name.";
+      GenericTestUtils.assertExceptionContains(errorMsg, e);
+    }
+    
+    String errorMsg = "Snapshot name cannot contain \"" + Path.SEPARATOR + "\"";
+    final String[] badNames = new String[] { "foo" + Path.SEPARATOR,
+        Path.SEPARATOR + "foo", Path.SEPARATOR, "foo" + Path.SEPARATOR + "bar" };
+    for (String badName : badNames) {
+      try {
+        hdfs.renameSnapshot(sub1, "s1", badName);
+        fail("Exception expected when an illegal name is given");
+      } catch (RemoteException e) {
+        GenericTestUtils.assertExceptionContains(errorMsg, e);
+      }
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java?rev=1516230&r1=1516229&r2=1516230&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java Wed Aug 21 17:47:10 2013
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs.web;
 import java.io.BufferedReader;
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.net.HttpURLConnection;
 import java.net.URL;
@@ -45,8 +46,11 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.hdfs.web.resources.DoAsParam;
 import org.apache.hadoop.hdfs.web.resources.GetOpParam;
 import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
+import org.apache.hadoop.hdfs.web.resources.LengthParam;
 import org.apache.hadoop.hdfs.web.resources.NamenodeRpcAddressParam;
+import org.apache.hadoop.hdfs.web.resources.OffsetParam;
 import org.apache.hadoop.hdfs.web.resources.PutOpParam;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.Assert;
@@ -288,6 +292,104 @@ public class TestWebHdfsFileSystemContra
     }
   }
 
+  /**
+   * Test get with length parameter greater than actual file length.
+   */
+  public void testLengthParamLongerThanFile() throws IOException {
+    WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs;
+    Path dir = new Path("/test");
+    assertTrue(webhdfs.mkdirs(dir));
+
+    // Create a file with some content.
+    Path testFile = new Path("/test/testLengthParamLongerThanFile");
+    String content = "testLengthParamLongerThanFile";
+    FSDataOutputStream testFileOut = webhdfs.create(testFile);
+    try {
+      testFileOut.write(content.getBytes("US-ASCII"));
+    } finally {
+      IOUtils.closeStream(testFileOut);
+    }
+
+    // Open the file, but request length longer than actual file length by 1.
+    HttpOpParam.Op op = GetOpParam.Op.OPEN;
+    URL url = webhdfs.toUrl(op, testFile, new LengthParam(Long.valueOf(
+      content.length() + 1)));
+    HttpURLConnection conn = null;
+    InputStream is = null;
+    try {
+      conn = (HttpURLConnection)url.openConnection();
+      conn.setRequestMethod(op.getType().toString());
+      conn.setDoOutput(op.getDoOutput());
+      conn.setInstanceFollowRedirects(true);
+
+      // Expect OK response and Content-Length header equal to actual length.
+      assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode());
+      assertEquals(String.valueOf(content.length()), conn.getHeaderField(
+        "Content-Length"));
+
+      // Check content matches.
+      byte[] respBody = new byte[content.length()];
+      is = conn.getInputStream();
+      IOUtils.readFully(is, respBody, 0, content.length());
+      assertEquals(content, new String(respBody, "US-ASCII"));
+    } finally {
+      IOUtils.closeStream(is);
+      if (conn != null) {
+        conn.disconnect();
+      }
+    }
+  }
+
+  /**
+   * Test get with offset and length parameters that combine to request a length
+   * greater than actual file length.
+   */
+  public void testOffsetPlusLengthParamsLongerThanFile() throws IOException {
+    WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs;
+    Path dir = new Path("/test");
+    assertTrue(webhdfs.mkdirs(dir));
+
+    // Create a file with some content.
+    Path testFile = new Path("/test/testOffsetPlusLengthParamsLongerThanFile");
+    String content = "testOffsetPlusLengthParamsLongerThanFile";
+    FSDataOutputStream testFileOut = webhdfs.create(testFile);
+    try {
+      testFileOut.write(content.getBytes("US-ASCII"));
+    } finally {
+      IOUtils.closeStream(testFileOut);
+    }
+
+    // Open the file, but request offset starting at 1 and length equal to file
+    // length.  Considering the offset, this is longer than the actual content.
+    HttpOpParam.Op op = GetOpParam.Op.OPEN;
+    URL url = webhdfs.toUrl(op, testFile, new LengthParam(Long.valueOf(
+      content.length())), new OffsetParam(1L));
+    HttpURLConnection conn = null;
+    InputStream is = null;
+    try {
+      conn = (HttpURLConnection)url.openConnection();
+      conn.setRequestMethod(op.getType().toString());
+      conn.setDoOutput(op.getDoOutput());
+      conn.setInstanceFollowRedirects(true);
+
+      // Expect OK response and Content-Length header equal to actual length.
+      assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode());
+      assertEquals(String.valueOf(content.length() - 1), conn.getHeaderField(
+        "Content-Length"));
+
+      // Check content matches.
+      byte[] respBody = new byte[content.length() - 1];
+      is = conn.getInputStream();
+      IOUtils.readFully(is, respBody, 0, content.length() - 1);
+      assertEquals(content.substring(1), new String(respBody, "US-ASCII"));
+    } finally {
+      IOUtils.closeStream(is);
+      if (conn != null) {
+        conn.disconnect();
+      }
+    }
+  }
+
   public void testResponseCode() throws IOException {
     final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs;
     final Path root = new Path("/");



Mime
View raw message