hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From su...@apache.org
Subject svn commit: r1609878 [7/9] - in /hadoop/common/branches/YARN-1051/hadoop-hdfs-project: hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/ hadoop-hdfs...
Date Sat, 12 Jul 2014 02:24:55 GMT
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsSnapshots.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsSnapshots.xml?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsSnapshots.xml (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsSnapshots.xml Sat Jul 12 02:24:40 2014
@@ -97,7 +97,9 @@
     <li>Listing the files in snapshot <code>s0</code>:
       <source>hdfs dfs -ls /foo/.snapshot/s0</source></li>
     <li>Copying a file from snapshot <code>s0</code>:
-      <source>hdfs dfs -cp /foo/.snapshot/s0/bar /tmp</source></li>
+      <source>hdfs dfs -cp -ptopax /foo/.snapshot/s0/bar /tmp</source>
+      <p>Note that this example uses the preserve option to preserve
+         timestamps, ownership, permission, ACLs and XAttrs.</p></li>
   </ul>
   </subsection>
   </section>
@@ -255,7 +257,35 @@
       <tr><td>fromSnapshot</td><td>The name of the starting snapshot.</td></tr>
       <tr><td>toSnapshot</td><td>The name of the ending snapshot.</td></tr>
     </table></li>
-  </ul>
+    <li>Results:
+      <table>
+        <tr><td>+</td><td>The file/directory has been created.</td></tr>
+        <tr><td>-</td><td>The file/directory has been deleted.</td></tr>
+        <tr><td>M</td><td>The file/directory has been modified.</td></tr>
+        <tr><td>R</td><td>The file/directory has been renamed.</td></tr>
+      </table>
+    </li>
+  </ul>
+  <p>
+    A <em>RENAME</em> entry indicates a file/directory has been renamed but
+    is still under the same snapshottable directory. A file/directory is
+    reported as deleted if it was renamed to outside of the snapshottble directory.
+    A file/directory renamed from outside of the snapshottble directory is
+    reported as newly created.
+  </p>
+  <p>
+    The snapshot difference report does not guarantee the same operation sequence.
+    For example, if we rename the directory <em>"/foo"</em> to <em>"/foo2"</em>, and
+    then append new data to the file <em>"/foo2/bar"</em>, the difference report will
+    be:
+    <source>
+    R. /foo -> /foo2
+    M. /foo/bar
+    </source>
+    I.e., the changes on the files/directories under a renamed directory is
+    reported using the original path before the rename (<em>"/foo/bar"</em> in
+    the above example).
+  </p>
   <p>
     See also the corresponding Java API
     <code>SnapshotDiffReport getSnapshotDiffReport(Path path, String fromSnapshot, String toSnapshot)</code>

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java Sat Jul 12 02:24:40 2014
@@ -51,6 +51,8 @@ public class TestBlockMissingException {
     long blockSize = 1024L;
     int numBlocks = 4;
     conf = new HdfsConfiguration();
+    // Set short retry timeouts so this test runs faster
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
     try {
       dfs = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
       dfs.waitActive();

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java Sat Jul 12 02:24:40 2014
@@ -64,6 +64,8 @@ public class TestBlockReaderLocalLegacy 
     conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
         UserGroupInformation.getCurrentUser().getShortUserName());
     conf.setBoolean(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, false);
+    // Set short retry timeouts so this test runs faster
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
     return conf;
   }
 

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java Sat Jul 12 02:24:40 2014
@@ -73,7 +73,8 @@ public class TestClientReportBadBlock {
   public void startUpCluster() throws IOException {
     // disable block scanner
     conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); 
-    
+    // Set short retry timeouts so this test runs faster
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes)
         .build();
     cluster.waitActive();

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java Sat Jul 12 02:24:40 2014
@@ -88,6 +88,8 @@ public class TestCrcCorruption {
   @Test(timeout=50000)
   public void testCorruptionDuringWrt() throws Exception {
     Configuration conf = new HdfsConfiguration();
+    // Set short retry timeouts so this test runs faster
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
     MiniDFSCluster cluster = null;
 
     try {
@@ -152,7 +154,8 @@ public class TestCrcCorruption {
     int numDataNodes = 2;
     short replFactor = 2;
     Random random = new Random();
-
+    // Set short retry timeouts so this test runs faster
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
     try {
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
       cluster.waitActive();
@@ -334,6 +337,8 @@ public class TestCrcCorruption {
     short replFactor = (short)numDataNodes;
     Configuration conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, numDataNodes);
+    // Set short retry timeouts so this test runs faster
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
 
     try {

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java Sat Jul 12 02:24:40 2014
@@ -56,6 +56,7 @@ import org.junit.Test;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
 import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
+import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT;
 import static org.apache.hadoop.fs.permission.AclEntryType.*;
 import static org.apache.hadoop.fs.permission.FsAction.*;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
@@ -1478,7 +1479,8 @@ public class TestDFSShell {
     Path root = new Path("/test/get");
     final Path remotef = new Path(root, fname);
     final Configuration conf = new HdfsConfiguration();
-
+    // Set short retry timeouts so this test runs faster
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
     TestGetRunner runner = new TestGetRunner() {
     	private int count = 0;
     	private final FsShell shell = new FsShell(conf);
@@ -1778,6 +1780,166 @@ public class TestDFSShell {
     }
   }
 
+  // verify cp -ptopxa option will preserve directory attributes.
+  @Test (timeout = 120000)
+  public void testCopyCommandsToDirectoryWithPreserveOption()
+      throws Exception {
+    Configuration conf = new Configuration();
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+        .format(true).build();
+    FsShell shell = null;
+    FileSystem fs = null;
+    final String testdir =
+        "/tmp/TestDFSShell-testCopyCommandsToDirectoryWithPreserveOption-"
+        + counter.getAndIncrement();
+    final Path hdfsTestDir = new Path(testdir);
+    try {
+      fs = cluster.getFileSystem();
+      fs.mkdirs(hdfsTestDir);
+      Path srcDir = new Path(hdfsTestDir, "srcDir");
+      fs.mkdirs(srcDir);
+
+      fs.setAcl(srcDir, Lists.newArrayList(
+          aclEntry(ACCESS, USER, ALL),
+          aclEntry(ACCESS, USER, "foo", ALL),
+          aclEntry(ACCESS, GROUP, READ_EXECUTE),
+          aclEntry(DEFAULT, GROUP, "bar", READ_EXECUTE),
+          aclEntry(ACCESS, OTHER, EXECUTE)));
+      // set sticky bit
+      fs.setPermission(srcDir,
+          new FsPermission(ALL, READ_EXECUTE, EXECUTE, true));
+
+      // Create a file in srcDir to check if modification time of
+      // srcDir to be preserved after copying the file.
+      // If cp -p command is to preserve modification time and then copy child
+      // (srcFile), modification time will not be preserved.
+      Path srcFile = new Path(srcDir, "srcFile");
+      fs.create(srcFile).close();
+
+      FileStatus status = fs.getFileStatus(srcDir);
+      final long mtime = status.getModificationTime();
+      final long atime = status.getAccessTime();
+      final String owner = status.getOwner();
+      final String group = status.getGroup();
+      final FsPermission perm = status.getPermission();
+
+      fs.setXAttr(srcDir, "user.a1", new byte[]{0x31, 0x32, 0x33});
+      fs.setXAttr(srcDir, "trusted.a1", new byte[]{0x31, 0x31, 0x31});
+
+      shell = new FsShell(conf);
+
+      // -p
+      Path targetDir1 = new Path(hdfsTestDir, "targetDir1");
+      String[] argv = new String[] { "-cp", "-p", srcDir.toUri().toString(),
+          targetDir1.toUri().toString() };
+      int ret = ToolRunner.run(shell, argv);
+      assertEquals("cp -p is not working", SUCCESS, ret);
+      FileStatus targetStatus = fs.getFileStatus(targetDir1);
+      assertEquals(mtime, targetStatus.getModificationTime());
+      assertEquals(atime, targetStatus.getAccessTime());
+      assertEquals(owner, targetStatus.getOwner());
+      assertEquals(group, targetStatus.getGroup());
+      FsPermission targetPerm = targetStatus.getPermission();
+      assertTrue(perm.equals(targetPerm));
+      Map<String, byte[]> xattrs = fs.getXAttrs(targetDir1);
+      assertTrue(xattrs.isEmpty());
+      List<AclEntry> acls = fs.getAclStatus(targetDir1).getEntries();
+      assertTrue(acls.isEmpty());
+      assertFalse(targetPerm.getAclBit());
+
+      // -ptop
+      Path targetDir2 = new Path(hdfsTestDir, "targetDir2");
+      argv = new String[] { "-cp", "-ptop", srcDir.toUri().toString(),
+          targetDir2.toUri().toString() };
+      ret = ToolRunner.run(shell, argv);
+      assertEquals("cp -ptop is not working", SUCCESS, ret);
+      targetStatus = fs.getFileStatus(targetDir2);
+      assertEquals(mtime, targetStatus.getModificationTime());
+      assertEquals(atime, targetStatus.getAccessTime());
+      assertEquals(owner, targetStatus.getOwner());
+      assertEquals(group, targetStatus.getGroup());
+      targetPerm = targetStatus.getPermission();
+      assertTrue(perm.equals(targetPerm));
+      xattrs = fs.getXAttrs(targetDir2);
+      assertTrue(xattrs.isEmpty());
+      acls = fs.getAclStatus(targetDir2).getEntries();
+      assertTrue(acls.isEmpty());
+      assertFalse(targetPerm.getAclBit());
+
+      // -ptopx
+      Path targetDir3 = new Path(hdfsTestDir, "targetDir3");
+      argv = new String[] { "-cp", "-ptopx", srcDir.toUri().toString(),
+          targetDir3.toUri().toString() };
+      ret = ToolRunner.run(shell, argv);
+      assertEquals("cp -ptopx is not working", SUCCESS, ret);
+      targetStatus = fs.getFileStatus(targetDir3);
+      assertEquals(mtime, targetStatus.getModificationTime());
+      assertEquals(atime, targetStatus.getAccessTime());
+      assertEquals(owner, targetStatus.getOwner());
+      assertEquals(group, targetStatus.getGroup());
+      targetPerm = targetStatus.getPermission();
+      assertTrue(perm.equals(targetPerm));
+      xattrs = fs.getXAttrs(targetDir3);
+      assertEquals(xattrs.size(), 2);
+      assertArrayEquals(new byte[]{0x31, 0x32, 0x33}, xattrs.get("user.a1"));
+      assertArrayEquals(new byte[]{0x31, 0x31, 0x31}, xattrs.get("trusted.a1"));
+      acls = fs.getAclStatus(targetDir3).getEntries();
+      assertTrue(acls.isEmpty());
+      assertFalse(targetPerm.getAclBit());
+
+      // -ptopa
+      Path targetDir4 = new Path(hdfsTestDir, "targetDir4");
+      argv = new String[] { "-cp", "-ptopa", srcDir.toUri().toString(),
+          targetDir4.toUri().toString() };
+      ret = ToolRunner.run(shell, argv);
+      assertEquals("cp -ptopa is not working", SUCCESS, ret);
+      targetStatus = fs.getFileStatus(targetDir4);
+      assertEquals(mtime, targetStatus.getModificationTime());
+      assertEquals(atime, targetStatus.getAccessTime());
+      assertEquals(owner, targetStatus.getOwner());
+      assertEquals(group, targetStatus.getGroup());
+      targetPerm = targetStatus.getPermission();
+      assertTrue(perm.equals(targetPerm));
+      xattrs = fs.getXAttrs(targetDir4);
+      assertTrue(xattrs.isEmpty());
+      acls = fs.getAclStatus(targetDir4).getEntries();
+      assertFalse(acls.isEmpty());
+      assertTrue(targetPerm.getAclBit());
+      assertEquals(fs.getAclStatus(srcDir), fs.getAclStatus(targetDir4));
+
+      // -ptoa (verify -pa option will preserve permissions also)
+      Path targetDir5 = new Path(hdfsTestDir, "targetDir5");
+      argv = new String[] { "-cp", "-ptoa", srcDir.toUri().toString(),
+          targetDir5.toUri().toString() };
+      ret = ToolRunner.run(shell, argv);
+      assertEquals("cp -ptoa is not working", SUCCESS, ret);
+      targetStatus = fs.getFileStatus(targetDir5);
+      assertEquals(mtime, targetStatus.getModificationTime());
+      assertEquals(atime, targetStatus.getAccessTime());
+      assertEquals(owner, targetStatus.getOwner());
+      assertEquals(group, targetStatus.getGroup());
+      targetPerm = targetStatus.getPermission();
+      assertTrue(perm.equals(targetPerm));
+      xattrs = fs.getXAttrs(targetDir5);
+      assertTrue(xattrs.isEmpty());
+      acls = fs.getAclStatus(targetDir5).getEntries();
+      assertFalse(acls.isEmpty());
+      assertTrue(targetPerm.getAclBit());
+      assertEquals(fs.getAclStatus(srcDir), fs.getAclStatus(targetDir5));
+    } finally {
+      if (shell != null) {
+        shell.close();
+      }
+      if (fs != null) {
+        fs.delete(hdfsTestDir, true);
+        fs.close();
+      }
+      cluster.shutdown();
+    }
+  }
+
   // Verify cp -pa option will preserve both ACL and sticky bit.
   @Test (timeout = 120000)
   public void testCopyCommandsPreserveAclAndStickyBit() throws Exception {
@@ -2295,38 +2457,39 @@ public class TestDFSShell {
   }
 
   /**
-   * HDFS-6374 setXAttr should require the user to be the owner of the file
-   * or directory.
-   *
-   * Test to make sure that only the owner of a file or directory can set
-   * or remove the xattrs.
-   *
-   * As user1:
-   * Create a directory (/foo) as user1, chown it to user1 (and user1's group),
-   * grant rwx to "other".
-   *
-   * As user2:
-   * Set an xattr (should fail).
-   *
-   * As user1:
-   * Set an xattr (should pass).
-   *
-   * As user2:
-   * Read the xattr (should pass).
-   * Remove the xattr (should fail).
-   *
-   * As user1:
-   * Read the xattr (should pass).
-   * Remove the xattr (should pass).
+   * 
+   * Test to make sure that user namespace xattrs can be set only if path has
+   * access and for sticky directorries, only owner/privileged user can write.
+   * Trusted namespace xattrs can be set only with privileged users.
+   * 
+   * As user1: Create a directory (/foo) as user1, chown it to user1 (and
+   * user1's group), grant rwx to "other".
+   * 
+   * As user2: Set an xattr (should pass with path access).
+   * 
+   * As user1: Set an xattr (should pass).
+   * 
+   * As user2: Read the xattr (should pass). Remove the xattr (should pass with
+   * path access).
+   * 
+   * As user1: Read the xattr (should pass). Remove the xattr (should pass).
+   * 
+   * As user1: Change permissions only to owner
+   * 
+   * As User2: Set an Xattr (Should fail set with no path access) Remove an
+   * Xattr (Should fail with no path access)
+   * 
+   * As SuperUser: Set an Xattr with Trusted (Should pass)
    */
   @Test (timeout = 30000)
   public void testSetXAttrPermissionAsDifferentOwner() throws Exception {
     final String USER1 = "user1";
-    final String GROUP1 = "mygroup1";
+    final String GROUP1 = "supergroup";
     final UserGroupInformation user1 = UserGroupInformation.
         createUserForTesting(USER1, new String[] {GROUP1});
     final UserGroupInformation user2 = UserGroupInformation.
         createUserForTesting("user2", new String[] {"mygroup2"});
+    final UserGroupInformation SUPERUSER = UserGroupInformation.getCurrentUser();
     MiniDFSCluster cluster = null;
     PrintStream bak = null;
     try {
@@ -2342,7 +2505,7 @@ public class TestDFSShell {
       final ByteArrayOutputStream out = new ByteArrayOutputStream();
       System.setErr(new PrintStream(out));
 
-      // mkdir foo as user1
+      //Test 1.  Let user1 be owner for /foo
       user1.doAs(new PrivilegedExceptionAction<Object>() {
         @Override
         public Object run() throws Exception {
@@ -2353,7 +2516,8 @@ public class TestDFSShell {
           return null;
         }
       });
-
+      
+      //Test 2. Give access to others
       user1.doAs(new PrivilegedExceptionAction<Object>() {
         @Override
         public Object run() throws Exception {
@@ -2366,23 +2530,21 @@ public class TestDFSShell {
         }
       });
 
-      // No permission to write xattr for non-owning user (user2).
+      // Test 3. Should be allowed to write xattr if there is a path access to
+      // user (user2).
       user2.doAs(new PrivilegedExceptionAction<Object>() {
         @Override
         public Object run() throws Exception {
           final int ret = ToolRunner.run(fshell, new String[]{
               "-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
-          assertEquals("Returned should be 1", 1, ret);
-          final String str = out.toString();
-          assertTrue("Permission denied printed",
-              str.indexOf("Permission denied") != -1);
+          assertEquals("Returned should be 0", 0, ret);
           out.reset();
           return null;
         }
       });
 
-      // But there should be permission to write xattr for
-      // the owning user.
+      //Test 4. There should be permission to write xattr for
+      // the owning user with write permissions.
       user1.doAs(new PrivilegedExceptionAction<Object>() {
         @Override
         public Object run() throws Exception {
@@ -2394,19 +2556,55 @@ public class TestDFSShell {
         }
       });
 
-      // There should be permission to read,but not to remove for
-      // non-owning user (user2).
+      // Test 5. There should be permission to read non-owning user (user2) if
+      // there is path access to that user and also can remove.
       user2.doAs(new PrivilegedExceptionAction<Object>() {
         @Override
         public Object run() throws Exception {
           // Read
-          int ret = ToolRunner.run(fshell, new String[]{
-              "-getfattr", "-n", "user.a1", "/foo"});
+          int ret = ToolRunner.run(fshell, new String[] { "-getfattr", "-n",
+              "user.a1", "/foo" });
           assertEquals("Returned should be 0", 0, ret);
           out.reset();
           // Remove
-          ret = ToolRunner.run(fshell, new String[]{
-              "-setfattr", "-x", "user.a1", "/foo"});
+          ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-x",
+              "user.a1", "/foo" });
+          assertEquals("Returned should be 0", 0, ret);
+          out.reset();
+          return null;
+        }
+      });
+
+      // Test 6. There should be permission to read/remove for
+      // the owning user with path access.
+      user1.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          return null;
+        }
+      });
+      
+      // Test 7. Change permission to have path access only to owner(user1)
+      user1.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          // Give access to "other"
+          final int ret = ToolRunner.run(fshell, new String[]{
+              "-chmod", "700", "/foo"});
+          assertEquals("Return should be 0", 0, ret);
+          out.reset();
+          return null;
+        }
+      });
+      
+      // Test 8. There should be no permissions to set for
+      // the non-owning user with no path access.
+      user2.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          // set
+          int ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-n",
+              "user.a2", "/foo" });
           assertEquals("Returned should be 1", 1, ret);
           final String str = out.toString();
           assertTrue("Permission denied printed",
@@ -2415,20 +2613,31 @@ public class TestDFSShell {
           return null;
         }
       });
-
-      // But there should be permission to read/remove for
-      // the owning user.
-      user1.doAs(new PrivilegedExceptionAction<Object>() {
+      
+      // Test 9. There should be no permissions to remove for
+      // the non-owning user with no path access.
+      user2.doAs(new PrivilegedExceptionAction<Object>() {
         @Override
         public Object run() throws Exception {
-          // Read
-          int ret = ToolRunner.run(fshell, new String[]{
-              "-getfattr", "-n", "user.a1", "/foo"});
-          assertEquals("Returned should be 0", 0, ret);
+          // set
+          int ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-x",
+              "user.a2", "/foo" });
+          assertEquals("Returned should be 1", 1, ret);
+          final String str = out.toString();
+          assertTrue("Permission denied printed",
+              str.indexOf("Permission denied") != -1);
           out.reset();
-          // Remove
-          ret = ToolRunner.run(fshell, new String[]{
-              "-setfattr", "-x", "user.a1", "/foo"});
+          return null;
+        }
+      });
+      
+      // Test 10. Superuser should be allowed to set with trusted namespace
+      SUPERUSER.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          // set
+          int ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-n",
+              "trusted.a3", "/foo" });
           assertEquals("Returned should be 0", 0, ret);
           out.reset();
           return null;

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java Sat Jul 12 02:24:40 2014
@@ -202,6 +202,8 @@ public class TestEncryptedTransfer {
     MiniDFSCluster cluster = null;
     try {
       Configuration conf = new Configuration();
+      // Set short retry timeouts so this test runs faster
+      conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
       cluster = new MiniDFSCluster.Builder(conf).build();
       
       FileSystem fs = getFileSystem(conf);

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java Sat Jul 12 02:24:40 2014
@@ -153,6 +153,15 @@ public class TestLeaseRecovery2 {
     verifyFile(dfs, filepath1, actual, size);
   }
 
+  @Test
+  public void testLeaseRecoverByAnotherUser() throws Exception {
+    byte [] actual = new byte[FILE_SIZE];
+    cluster.setLeasePeriod(SHORT_LEASE_PERIOD, LONG_LEASE_PERIOD);
+    Path filepath = createFile("/immediateRecoverLease-x", 0, true);
+    recoverLeaseUsingCreate2(filepath);
+    verifyFile(dfs, filepath, actual, 0);
+  }
+
   private Path createFile(final String filestr, final int size,
       final boolean triggerLeaseRenewerInterrupt)
   throws IOException, InterruptedException {
@@ -196,7 +205,7 @@ public class TestLeaseRecovery2 {
   }
 
   private void recoverLeaseUsingCreate(Path filepath)
-  throws IOException, InterruptedException {
+      throws IOException, InterruptedException {
     FileSystem dfs2 = getFSAsAnotherUser(conf);
     for(int i = 0; i < 10; i++) {
       AppendTestUtil.LOG.info("i=" + i);
@@ -216,6 +225,20 @@ public class TestLeaseRecovery2 {
     fail("recoverLeaseUsingCreate failed");
   }
 
+  private void recoverLeaseUsingCreate2(Path filepath)
+          throws Exception {
+    FileSystem dfs2 = getFSAsAnotherUser(conf);
+    int size = AppendTestUtil.nextInt(FILE_SIZE);
+    DistributedFileSystem dfsx = (DistributedFileSystem) dfs2;
+    //create file using dfsx
+    Path filepath2 = new Path("/immediateRecoverLease-x2");
+    FSDataOutputStream stm = dfsx.create(filepath2, true, BUF_SIZE,
+        REPLICATION_NUM, BLOCK_SIZE);
+    assertTrue(dfsx.dfs.exists("/immediateRecoverLease-x2"));
+    try {Thread.sleep(10000);} catch (InterruptedException e) {}
+    dfsx.append(filepath);
+  }
+
   private void verifyFile(FileSystem dfs, Path filepath, byte[] actual,
       int size) throws IOException {
     AppendTestUtil.LOG.info("Lease for file " +  filepath + " is recovered. "

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java Sat Jul 12 02:24:40 2014
@@ -58,6 +58,7 @@ public class TestMissingBlocksAlert {
       Configuration conf = new HdfsConfiguration();
       //minimize test delay
       conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 0);
+      conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
       int fileLen = 10*1024;
       conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, fileLen/2);
 

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java Sat Jul 12 02:24:40 2014
@@ -31,12 +31,16 @@ import java.util.concurrent.Future;
 
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.log4j.Level;
+import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
@@ -49,7 +53,14 @@ import org.mockito.stubbing.Answer;
 public class TestPread {
   static final long seed = 0xDEADBEEFL;
   static final int blockSize = 4096;
-  boolean simulatedStorage = false;
+  boolean simulatedStorage;
+  boolean isHedgedRead;
+
+  @Before
+  public void setup() {
+    simulatedStorage = false;
+    isHedgedRead = false;
+  }
 
   private void writeFile(FileSystem fileSys, Path name) throws IOException {
     int replication = 3;// We need > 1 blocks to test out the hedged reads.
@@ -73,7 +84,7 @@ public class TestPread {
     
     // now create the real file
     DFSTestUtil.createFile(fileSys, name, 12 * blockSize, 12 * blockSize,
-        blockSize, (short) 1, seed);
+        blockSize, (short) replication, seed);
   }
   
   private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {
@@ -104,8 +115,13 @@ public class TestPread {
     }
 
     if (dfstm != null) {
-      assertEquals("Expected read statistic to be incremented", length, dfstm
-          .getReadStatistics().getTotalBytesRead() - totalRead);
+      if (isHedgedRead) {
+        assertTrue("Expected read statistic to be incremented", length <= dfstm
+            .getReadStatistics().getTotalBytesRead() - totalRead);
+      } else {
+        assertEquals("Expected read statistic to be incremented", length, dfstm
+            .getReadStatistics().getTotalBytesRead() - totalRead);
+      }
     }
   }
   
@@ -208,7 +224,7 @@ public class TestPread {
     stm.readFully(0, actual);
     checkAndEraseData(actual, 0, expected, "Pread Datanode Restart Test");
   }
-  
+
   private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
     assertTrue(fileSys.exists(name));
     assertTrue(fileSys.delete(name, true));
@@ -249,6 +265,7 @@ public class TestPread {
    */
   @Test
   public void testHedgedPreadDFSBasic() throws IOException {
+    isHedgedRead = true;
     Configuration conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE, 5);
     conf.setLong(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS, 1);
@@ -258,8 +275,83 @@ public class TestPread {
   }
 
   @Test
+  public void testHedgedReadLoopTooManyTimes() throws IOException {
+    Configuration conf = new Configuration();
+    int numHedgedReadPoolThreads = 5;
+    final int hedgedReadTimeoutMillis = 50;
+
+    conf.setInt(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE,
+        numHedgedReadPoolThreads);
+    conf.setLong(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS,
+        hedgedReadTimeoutMillis);
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 0);
+    // Set up the InjectionHandler
+    DFSClientFaultInjector.instance = Mockito
+        .mock(DFSClientFaultInjector.class);
+    DFSClientFaultInjector injector = DFSClientFaultInjector.instance;
+    final int sleepMs = 100;
+    Mockito.doAnswer(new Answer<Void>() {
+      @Override
+      public Void answer(InvocationOnMock invocation) throws Throwable {
+        if (true) {
+          Thread.sleep(hedgedReadTimeoutMillis + sleepMs);
+          if (DFSClientFaultInjector.exceptionNum.compareAndSet(0, 1)) {
+            System.out.println("-------------- throw Checksum Exception");
+            throw new ChecksumException("ChecksumException test", 100);
+          }
+        }
+        return null;
+      }
+    }).when(injector).fetchFromDatanodeException();
+    Mockito.doAnswer(new Answer<Void>() {
+      @Override
+      public Void answer(InvocationOnMock invocation) throws Throwable {
+        if (true) {
+          Thread.sleep(sleepMs * 2);
+        }
+        return null;
+      }
+    }).when(injector).readFromDatanodeDelay();
+
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
+        .format(true).build();
+    DistributedFileSystem fileSys = cluster.getFileSystem();
+    DFSClient dfsClient = fileSys.getClient();
+    FSDataOutputStream output = null;
+    DFSInputStream input = null;
+    String filename = "/hedgedReadMaxOut.dat";
+    try {
+      
+      Path file = new Path(filename);
+      output = fileSys.create(file, (short) 2);
+      byte[] data = new byte[64 * 1024];
+      output.write(data);
+      output.flush();
+      output.write(data);
+      output.flush();
+      output.write(data);
+      output.flush();
+      output.close();
+      byte[] buffer = new byte[64 * 1024];
+      input = dfsClient.open(filename);
+      input.read(0, buffer, 0, 1024);
+      input.close();
+      assertEquals(3, input.getHedgedReadOpsLoopNumForTesting());
+    } catch (BlockMissingException e) {
+      assertTrue(false);
+    } finally {
+      Mockito.reset(injector);
+      IOUtils.cleanup(null, input);
+      IOUtils.cleanup(null, output);
+      fileSys.close();
+      cluster.shutdown();
+    }
+  }
+
+  @Test
   public void testMaxOutHedgedReadPool() throws IOException,
       InterruptedException, ExecutionException {
+    isHedgedRead = true;
     Configuration conf = new Configuration();
     int numHedgedReadPoolThreads = 5;
     final int initialHedgedReadTimeoutMillis = 50000;
@@ -342,6 +434,8 @@ public class TestPread {
       throws IOException {
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
     conf.setLong(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, 4096);
+    // Set short retry timeouts so this test runs faster
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 0);
     if (simulatedStorage) {
       SimulatedFSDataset.setFactory(conf);
     }
@@ -367,7 +461,6 @@ public class TestPread {
   public void testPreadDFSSimulated() throws IOException {
     simulatedStorage = true;
     testPreadDFS();
-    simulatedStorage = false;
   }
   
   /**

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java Sat Jul 12 02:24:40 2014
@@ -390,6 +390,10 @@ public class TestRollingUpgrade {
 
       // Once finalized, there should be no more fsimage for rollbacks.
       Assert.assertFalse(fsimage.hasRollbackFSImage());
+
+      // Should have no problem in restart and replaying edits that include
+      // the FINALIZE op.
+      dfsCluster.restartNameNode(0);
     } finally {
       if (cluster != null) {
         cluster.shutdown();

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java Sat Jul 12 02:24:40 2014
@@ -370,8 +370,13 @@ public class TestBalancer {
     // start rebalancing
     Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
     final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, conf);
-    assertEquals(Balancer.ReturnStatus.SUCCESS.code, r);
-
+    if (conf.getInt(DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY, 
+        DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT) ==0) {
+      assertEquals(Balancer.ReturnStatus.NO_MOVE_PROGRESS.code, r);
+      return;
+    } else {
+      assertEquals(Balancer.ReturnStatus.SUCCESS.code, r);
+    }
     waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
     LOG.info("Rebalancing with default ctor.");
     waitForBalancer(totalUsedSpace, totalCapacity, client, cluster);
@@ -463,6 +468,20 @@ public class TestBalancer {
   }
   
   @Test(timeout=100000)
+  public void testBalancerWithZeroThreadsForMove() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY, 0);
+    testBalancer1Internal (conf);
+  }
+
+  @Test(timeout=100000)
+  public void testBalancerWithNonZeroThreadsForMove() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY, 8);
+    testBalancer1Internal (conf);
+  }
+  
+  @Test(timeout=100000)
   public void testBalancer2() throws Exception {
     testBalancer2Internal(new HdfsConfiguration());
   }

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java Sat Jul 12 02:24:40 2014
@@ -29,6 +29,8 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.junit.Assert;
 import org.junit.Test;
 
 /**
@@ -42,6 +44,34 @@ public class TestBlockInfo {
   private static final Log LOG = LogFactory
       .getLog("org.apache.hadoop.hdfs.TestBlockInfo");
 
+
+  @Test
+  public void testAddStorage() throws Exception {
+    BlockInfo blockInfo = new BlockInfo(3);
+
+    final DatanodeStorageInfo storage = DFSTestUtil.createDatanodeStorageInfo("storageID", "127.0.0.1");
+
+    boolean added = blockInfo.addStorage(storage);
+
+    Assert.assertTrue(added);
+    Assert.assertEquals(storage, blockInfo.getStorageInfo(0));
+  }
+
+
+  @Test
+  public void testReplaceStorageIfDifferetnOneAlreadyExistedFromSameDataNode() throws Exception {
+    BlockInfo blockInfo = new BlockInfo(3);
+
+    final DatanodeStorageInfo storage1 = DFSTestUtil.createDatanodeStorageInfo("storageID1", "127.0.0.1");
+    final DatanodeStorageInfo storage2 = new DatanodeStorageInfo(storage1.getDatanodeDescriptor(), new DatanodeStorage("storageID2"));
+
+    blockInfo.addStorage(storage1);
+    boolean added = blockInfo.addStorage(storage2);
+
+    Assert.assertFalse(added);
+    Assert.assertEquals(storage2, blockInfo.getStorageInfo(0));
+  }
+
   @Test
   public void testBlockListMoveToHead() throws Exception {
     LOG.info("BlockInfo moveToHead tests...");

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java Sat Jul 12 02:24:40 2014
@@ -209,6 +209,8 @@ public class TestBlockTokenWithDFS {
     conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, numDataNodes);
     conf.setInt("ipc.client.connect.max.retries", 0);
+    // Set short retry timeouts so this test runs faster
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
     return conf;
   }
 

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java Sat Jul 12 02:24:40 2014
@@ -55,7 +55,7 @@ public class TestPendingInvalidateBlock 
     conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
     // block deletion pending period
-    conf.setLong(DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_MS_KEY, 1000 * 5);
+    conf.setLong(DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY, 5L);
     // set the block report interval to 2s
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 2000);
     conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java Sat Jul 12 02:24:40 2014
@@ -435,8 +435,9 @@ public class TestBPOfferService {
   }
 
   private ReceivedDeletedBlockInfo[] waitForBlockReceived(
-      ExtendedBlock fakeBlock,
-      DatanodeProtocolClientSideTranslatorPB mockNN) throws Exception {
+      final ExtendedBlock fakeBlock,
+      final DatanodeProtocolClientSideTranslatorPB mockNN) throws Exception {
+    final String fakeBlockPoolId = fakeBlock.getBlockPoolId();
     final ArgumentCaptor<StorageReceivedDeletedBlocks[]> captor =
       ArgumentCaptor.forClass(StorageReceivedDeletedBlocks[].class);
     GenericTestUtils.waitFor(new Supplier<Boolean>() {
@@ -444,9 +445,9 @@ public class TestBPOfferService {
       @Override
       public Boolean get() {
         try {
-          Mockito.verify(mockNN1).blockReceivedAndDeleted(
+          Mockito.verify(mockNN).blockReceivedAndDeleted(
             Mockito.<DatanodeRegistration>anyObject(),
-            Mockito.eq(FAKE_BPID),
+            Mockito.eq(fakeBlockPoolId),
             captor.capture());
           return true;
         } catch (Throwable t) {

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java Sat Jul 12 02:24:40 2014
@@ -160,7 +160,8 @@ public class TestDeleteBlockPool {
       conf.set(DFSConfigKeys.DFS_NAMESERVICES,
           "namesServerId1,namesServerId2");
       cluster = new MiniDFSCluster.Builder(conf)
-        .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
+        .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(
+            conf.get(DFSConfigKeys.DFS_NAMESERVICES)))
         .numDataNodes(1).build();
 
       cluster.waitActive();

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java Sat Jul 12 02:24:40 2014
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.da
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeTrue;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyInt;
 import static org.mockito.Matchers.anyLong;
@@ -68,6 +67,7 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
 import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator;
 import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
@@ -114,7 +114,6 @@ public class TestFsDatasetCache {
 
   @Before
   public void setUp() throws Exception {
-    assumeTrue(!Path.WINDOWS);
     conf = new HdfsConfiguration();
     conf.setLong(
         DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 100);
@@ -143,6 +142,9 @@ public class TestFsDatasetCache {
 
   @After
   public void tearDown() throws Exception {
+    // Verify that each test uncached whatever it cached.  This cleanup is
+    // required so that file descriptors are not leaked across tests.
+    DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
     if (fs != null) {
       fs.close();
     }
@@ -205,9 +207,16 @@ public class TestFsDatasetCache {
       String bpid = loc.getLocatedBlock().getBlock().getBlockPoolId();
       Block block = loc.getLocatedBlock().getBlock().getLocalBlock();
       ExtendedBlock extBlock = new ExtendedBlock(bpid, block);
-      FileChannel blockChannel =
-          ((FileInputStream)fsd.getBlockInputStream(extBlock, 0)).getChannel();
-      sizes[i] = blockChannel.size();
+      FileInputStream blockInputStream = null;
+      FileChannel blockChannel = null;
+      try {
+        blockInputStream =
+          (FileInputStream)fsd.getBlockInputStream(extBlock, 0);
+        blockChannel = blockInputStream.getChannel();
+        sizes[i] = blockChannel.size();
+      } finally {
+        IOUtils.cleanup(LOG, blockChannel, blockInputStream);
+      }
     }
     return sizes;
   }
@@ -571,5 +580,7 @@ public class TestFsDatasetCache {
         return true;
       }
     }, 1000, 30000);
+
+    dfs.removeCacheDirective(shortCacheDirectiveId);
   }
 }

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java Sat Jul 12 02:24:40 2014
@@ -83,7 +83,7 @@ public class CreateEditsLog {
 
       final INodeFile inode = new INodeFile(inodeId.nextValue(), null,
           p, 0L, 0L, blocks, replication, blockSize);
-      inode.toUnderConstruction("", "", null);
+      inode.toUnderConstruction("", "");
 
      // Append path to filename with information about blockIDs 
       String path = "_" + iF + "_B" + blocks[0].getBlockId() + 
@@ -98,7 +98,7 @@ public class CreateEditsLog {
       }
       INodeFile fileUc = new INodeFile(inodeId.nextValue(), null,
           p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize);
-      fileUc.toUnderConstruction("", "", null);
+      fileUc.toUnderConstruction("", "");
       editLog.logOpenFile(filePath, fileUc, false);
       editLog.logCloseFile(filePath, inode);
 

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java Sat Jul 12 02:24:40 2014
@@ -32,12 +32,20 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.test.GenericTestUtils;
+
+import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
+import static org.apache.hadoop.fs.permission.AclEntryType.USER;
+import static org.apache.hadoop.fs.permission.FsAction.ALL;
+import static org.apache.hadoop.fs.permission.FsAction.READ;
+import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import org.junit.After;
@@ -60,7 +68,7 @@ public class FSXAttrBaseTest {
   protected static MiniDFSCluster dfsCluster;
   protected static Configuration conf;
   private static int pathCount = 0;
-  private static Path path;
+  protected static Path path;
   
   // XAttrs
   protected static final String name1 = "user.a1";
@@ -73,10 +81,16 @@ public class FSXAttrBaseTest {
 
   protected FileSystem fs;
 
+  private static final UserGroupInformation BRUCE =
+      UserGroupInformation.createUserForTesting("bruce", new String[] { });
+  private static final UserGroupInformation DIANA =
+      UserGroupInformation.createUserForTesting("diana", new String[] { });
+
   @BeforeClass
   public static void init() throws Exception {
     conf = new HdfsConfiguration();
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, 3);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, MAX_SIZE);
     initCluster(true);
@@ -388,6 +402,21 @@ public class FSXAttrBaseTest {
     fs.removeXAttr(path, name3);
   }
 
+  @Test(timeout = 120000)
+  public void testRenameFileWithXAttr() throws Exception {
+    FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
+    fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
+    fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
+    Path renamePath = new Path(path.toString() + "-rename");
+    fs.rename(path, renamePath);
+    Map<String, byte[]> xattrs = fs.getXAttrs(renamePath);
+    Assert.assertEquals(xattrs.size(), 2);
+    Assert.assertArrayEquals(value1, xattrs.get(name1));
+    Assert.assertArrayEquals(value2, xattrs.get(name2));
+    fs.removeXAttr(renamePath, name1);
+    fs.removeXAttr(renamePath, name2);
+  }
+
   /**
    * Test the listXAttrs api.
    * listXAttrs on a path that doesn't exist.
@@ -535,6 +564,50 @@ public class FSXAttrBaseTest {
     Assert.assertArrayEquals(value1, xattrs.get(name1));
     Assert.assertArrayEquals(value2, xattrs.get(name2));
   }
+
+  @Test(timeout = 120000)
+  public void testXAttrAcl() throws Exception {
+    FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 0750));
+    fs.setOwner(path, BRUCE.getUserName(), null);
+    FileSystem fsAsBruce = createFileSystem(BRUCE);
+    FileSystem fsAsDiana = createFileSystem(DIANA);
+    fsAsBruce.setXAttr(path, name1, value1);
+
+    Map<String, byte[]> xattrs;
+    try {
+      xattrs = fsAsDiana.getXAttrs(path);
+      Assert.fail("Diana should not have read access to get xattrs");
+    } catch (AccessControlException e) {
+      // Ignore
+    }
+
+    // Give Diana read permissions to the path
+    fsAsBruce.modifyAclEntries(path, Lists.newArrayList(
+        aclEntry(ACCESS, USER, DIANA.getUserName(), READ)));
+    xattrs = fsAsDiana.getXAttrs(path);
+    Assert.assertArrayEquals(value1, xattrs.get(name1));
+
+    try {
+      fsAsDiana.removeXAttr(path, name1);
+      Assert.fail("Diana should not have write access to remove xattrs");
+    } catch (AccessControlException e) {
+      // Ignore
+    }
+
+    try {
+      fsAsDiana.setXAttr(path, name2, value2);
+      Assert.fail("Diana should not have write access to set xattrs");
+    } catch (AccessControlException e) {
+      // Ignore
+    }
+
+    fsAsBruce.modifyAclEntries(path, Lists.newArrayList(
+        aclEntry(ACCESS, USER, DIANA.getUserName(), ALL)));
+    fsAsDiana.setXAttr(path, name2, value2);
+    Assert.assertArrayEquals(value2, fsAsDiana.getXAttrs(path).get(name2));
+    fsAsDiana.removeXAttr(path, name1);
+    fsAsDiana.removeXAttr(path, name2);
+  }
   
   /**
    * Creates a FileSystem for the super-user.
@@ -545,6 +618,18 @@ public class FSXAttrBaseTest {
   protected FileSystem createFileSystem() throws Exception {
     return dfsCluster.getFileSystem();
   }
+
+  /**
+   * Creates a FileSystem for a specific user.
+   *
+   * @param user UserGroupInformation specific user
+   * @return FileSystem for specific user
+   * @throws Exception if creation fails
+   */
+  protected FileSystem createFileSystem(UserGroupInformation user)
+      throws Exception {
+    return DFSTestUtil.getFileSystemAs(user, conf);
+  }
   
   /**
    * Initializes all FileSystem instances used in the tests.

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java Sat Jul 12 02:24:40 2014
@@ -45,6 +45,7 @@ import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.security.AccessControlException;
 import org.mockito.Mockito;
+import org.mockito.internal.util.reflection.Whitebox;
 
 /**
  * This is a utility class to expose NameNode functionality for unit tests.
@@ -177,8 +178,9 @@ public class NameNodeAdapter {
   }
 
   public static FSImage spyOnFsImage(NameNode nn1) {
-    FSImage spy = Mockito.spy(nn1.getNamesystem().dir.fsImage);
-    nn1.getNamesystem().dir.fsImage = spy;
+    FSNamesystem fsn = nn1.getNamesystem();
+    FSImage spy = Mockito.spy(fsn.getFSImage());
+    Whitebox.setInternalState(fsn, "fsImage", spy);
     return spy;
   }
   

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java Sat Jul 12 02:24:40 2014
@@ -34,6 +34,7 @@ import static org.junit.Assert.fail;
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Date;
 import java.util.EnumSet;
 import java.util.Iterator;
@@ -682,6 +683,12 @@ public class TestCacheDirectives {
         } finally {
           namesystem.readUnlock();
         }
+
+        LOG.info(logString + " cached blocks: have " + numCachedBlocks +
+            " / " + expectedCachedBlocks + ".  " +
+            "cached replicas: have " + numCachedReplicas +
+            " / " + expectedCachedReplicas);
+
         if (expectedCachedBlocks == -1 ||
             numCachedBlocks == expectedCachedBlocks) {
           if (expectedCachedReplicas == -1 ||
@@ -689,10 +696,6 @@ public class TestCacheDirectives {
             return true;
           }
         }
-        LOG.info(logString + " cached blocks: have " + numCachedBlocks +
-            " / " + expectedCachedBlocks + ".  " +
-            "cached replicas: have " + numCachedReplicas +
-            " / " + expectedCachedReplicas);
         return false;
       }
     }, 500, 60000);
@@ -1415,7 +1418,10 @@ public class TestCacheDirectives {
       for (DataNode dn : cluster.getDataNodes()) {
         DatanodeDescriptor descriptor =
             datanodeManager.getDatanode(dn.getDatanodeId());
-        Assert.assertTrue(descriptor.getPendingCached().isEmpty());
+        Assert.assertTrue("Pending cached list of " + descriptor +
+                " is not empty, "
+                + Arrays.toString(descriptor.getPendingCached().toArray()), 
+            descriptor.getPendingCached().isEmpty());
       }
     } finally {
       cluster.getNamesystem().readUnlock();
@@ -1430,10 +1436,6 @@ public class TestCacheDirectives {
     int numCachedReplicas = (int) ((CACHE_CAPACITY*NUM_DATANODES)/BLOCK_SIZE);
     DFSTestUtil.createFile(dfs, fileName, fileLen, (short) NUM_DATANODES,
         0xFADED);
-    // Set up a log appender watcher
-    final LogVerificationAppender appender = new LogVerificationAppender();
-    final Logger logger = Logger.getRootLogger();
-    logger.addAppender(appender);
     dfs.addCachePool(new CachePoolInfo("pool"));
     dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool")
         .setPath(fileName).setReplication((short) 1).build());

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java Sat Jul 12 02:24:40 2014
@@ -1334,7 +1334,8 @@ public class TestCheckpoint {
     SecondaryNameNode secondary2 = null;
     try {
       cluster = new MiniDFSCluster.Builder(conf)
-          .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
+          .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(
+              conf.get(DFSConfigKeys.DFS_NAMESERVICES)))
           .build();
       Configuration snConf1 = new HdfsConfiguration(cluster.getConfiguration(0));
       Configuration snConf2 = new HdfsConfiguration(cluster.getConfiguration(1));

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java Sat Jul 12 02:24:40 2014
@@ -32,7 +32,6 @@ import java.io.IOException;
 
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyBoolean;
 import static org.mockito.Mockito.*;
 
 /**
@@ -50,6 +49,7 @@ public class TestCommitBlockSynchronizat
     final DatanodeStorageInfo[] targets = {};
 
     FSNamesystem namesystem = new FSNamesystem(conf, image);
+    namesystem.setImageLoaded(true);
     FSNamesystem namesystemSpy = spy(namesystem);
     BlockInfoUnderConstruction blockInfo = new BlockInfoUnderConstruction(
         block, 1, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java Sat Jul 12 02:24:40 2014
@@ -146,4 +146,62 @@ public class TestDeleteRace {
       }
     }
   }
+
+  private class RenameThread extends Thread {
+    private FileSystem fs;
+    private Path from;
+    private Path to;
+
+    RenameThread(FileSystem fs, Path from, Path to) {
+      this.fs = fs;
+      this.from = from;
+      this.to = to;
+    }
+
+    @Override
+    public void run() {
+      try {
+        Thread.sleep(1000);
+        LOG.info("Renaming " + from + " to " + to);
+
+        fs.rename(from, to);
+        LOG.info("Renamed " + from + " to " + to);
+      } catch (Exception e) {
+        LOG.info(e);
+      }
+    }
+  }
+
+  @Test
+  public void testRenameRace() throws Exception {
+    try {
+      conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
+          SlowBlockPlacementPolicy.class, BlockPlacementPolicy.class);
+      cluster = new MiniDFSCluster.Builder(conf).build();
+      FileSystem fs = cluster.getFileSystem();
+      Path dirPath1 = new Path("/testRenameRace1");
+      Path dirPath2 = new Path("/testRenameRace2");
+      Path filePath = new Path("/testRenameRace1/file1");
+      
+
+      fs.mkdirs(dirPath1);
+      FSDataOutputStream out = fs.create(filePath);
+      Thread renameThread = new RenameThread(fs, dirPath1, dirPath2);
+      renameThread.start();
+
+      // write data and close to make sure a block is allocated.
+      out.write(new byte[32], 0, 32);
+      out.close();
+
+      // Restart name node so that it replays edit. If old path was
+      // logged in edit, it will fail to come up.
+      cluster.restartNameNode(0);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+
+
+  }
 }

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java Sat Jul 12 02:24:40 2014
@@ -18,10 +18,12 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 import java.util.EnumSet;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -36,7 +38,9 @@ import org.junit.Test;
 
 public class TestDiskspaceQuotaUpdate {
   private static final int BLOCKSIZE = 1024;
-  private static final short REPLICATION = 1;
+  private static final short REPLICATION = 4;
+  static final long seed = 0L;
+  private static final Path dir = new Path("/TestQuotaUpdate");
 
   private Configuration conf;
   private MiniDFSCluster cluster;
@@ -63,41 +67,83 @@ public class TestDiskspaceQuotaUpdate {
   }
 
   /**
+   * Test if the quota can be correctly updated for create file
+   */
+  @Test (timeout=60000)
+  public void testQuotaUpdateWithFileCreate() throws Exception  {
+    final Path foo = new Path(dir, "foo");
+    Path createdFile = new Path(foo, "created_file.data");
+    dfs.mkdirs(foo);
+    dfs.setQuota(foo, Long.MAX_VALUE-1, Long.MAX_VALUE-1);
+    long fileLen = BLOCKSIZE * 2 + BLOCKSIZE / 2;
+    DFSTestUtil.createFile(dfs, createdFile, BLOCKSIZE / 16,
+        fileLen, BLOCKSIZE, REPLICATION, seed);
+    INode fnode = fsdir.getINode4Write(foo.toString());
+    assertTrue(fnode.isDirectory());
+    assertTrue(fnode.isQuotaSet());
+    Quota.Counts cnt = fnode.asDirectory().getDirectoryWithQuotaFeature()
+        .getSpaceConsumed();
+    assertEquals(2, cnt.get(Quota.NAMESPACE));
+    assertEquals(fileLen * REPLICATION, cnt.get(Quota.DISKSPACE));
+  }
+
+  /**
    * Test if the quota can be correctly updated for append
    */
-  @Test
+  @Test (timeout=60000)
   public void testUpdateQuotaForAppend() throws Exception {
-    final Path foo = new Path("/foo");
+    final Path foo = new Path(dir ,"foo");
     final Path bar = new Path(foo, "bar");
-    DFSTestUtil.createFile(dfs, bar, BLOCKSIZE, REPLICATION, 0L);
+    long currentFileLen = BLOCKSIZE;
+    DFSTestUtil.createFile(dfs, bar, currentFileLen, REPLICATION, seed);
     dfs.setQuota(foo, Long.MAX_VALUE-1, Long.MAX_VALUE-1);
 
-    // append half of the block data
+    // append half of the block data, the previous file length is at block
+    // boundary
     DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE / 2);
+    currentFileLen += (BLOCKSIZE / 2);
 
     INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
+    assertTrue(fooNode.isQuotaSet());
     Quota.Counts quota = fooNode.getDirectoryWithQuotaFeature()
         .getSpaceConsumed();
     long ns = quota.get(Quota.NAMESPACE);
     long ds = quota.get(Quota.DISKSPACE);
     assertEquals(2, ns); // foo and bar
-    assertEquals((BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION, ds);
+    assertEquals(currentFileLen * REPLICATION, ds);
+    ContentSummary c = dfs.getContentSummary(foo);
+    assertEquals(c.getSpaceConsumed(), ds);
 
-    // append another block
+    // append another block, the previous file length is not at block boundary
     DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE);
+    currentFileLen += BLOCKSIZE;
 
     quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
     ns = quota.get(Quota.NAMESPACE);
     ds = quota.get(Quota.DISKSPACE);
     assertEquals(2, ns); // foo and bar
-    assertEquals((BLOCKSIZE * 2 + BLOCKSIZE / 2) * REPLICATION, ds);
+    assertEquals(currentFileLen * REPLICATION, ds);
+    c = dfs.getContentSummary(foo);
+    assertEquals(c.getSpaceConsumed(), ds);
+
+    // append several blocks
+    DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE * 3 + BLOCKSIZE / 8);
+    currentFileLen += (BLOCKSIZE * 3 + BLOCKSIZE / 8);
+
+    quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
+    ns = quota.get(Quota.NAMESPACE);
+    ds = quota.get(Quota.DISKSPACE);
+    assertEquals(2, ns); // foo and bar
+    assertEquals(currentFileLen * REPLICATION, ds);
+    c = dfs.getContentSummary(foo);
+    assertEquals(c.getSpaceConsumed(), ds);
   }
 
   /**
    * Test if the quota can be correctly updated when file length is updated
    * through fsync
    */
-  @Test
+  @Test (timeout=60000)
   public void testUpdateQuotaForFSync() throws Exception {
     final Path foo = new Path("/foo");
     final Path bar = new Path(foo, "bar");

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Sat Jul 12 02:24:40 2014
@@ -195,7 +195,7 @@ public class TestEditLog {
       for (int i = 0; i < numTransactions; i++) {
         INodeFile inode = new INodeFile(namesystem.allocateNewInodeId(), null,
             p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize);
-        inode.toUnderConstruction("", "", null);
+        inode.toUnderConstruction("", "");
 
         editLog.logOpenFile("/filename" + (startIndex + i), inode, false);
         editLog.logCloseFile("/filename" + (startIndex + i), inode);

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java Sat Jul 12 02:24:40 2014
@@ -24,7 +24,9 @@ import java.io.IOException;
 import java.io.StringReader;
 import java.util.EnumSet;
 import java.util.List;
+import java.util.Random;
 
+import com.google.common.collect.ImmutableList;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -36,7 +38,6 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Assert;
@@ -45,6 +46,11 @@ import org.junit.Test;
 
 import com.google.common.collect.Lists;
 
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
 /**
  * Test {@link FSDirectory}, the in-memory namespace tree.
  */
@@ -74,6 +80,10 @@ public class TestFSDirectory {
 
   private DistributedFileSystem hdfs;
 
+  private static final int numGeneratedXAttrs = 256;
+  private static final ImmutableList<XAttr> generatedXAttrs =
+      ImmutableList.copyOf(generateXAttrs(numGeneratedXAttrs));
+
   @Before
   public void setUp() throws Exception {
     conf = new Configuration();
@@ -119,25 +129,16 @@ public class TestFSDirectory {
     for(; (line = in.readLine()) != null; ) {
       line = line.trim();
       if (!line.isEmpty() && !line.contains("snapshot")) {
-        Assert.assertTrue("line=" + line,
+        assertTrue("line=" + line,
             line.startsWith(INodeDirectory.DUMPTREE_LAST_ITEM)
-            || line.startsWith(INodeDirectory.DUMPTREE_EXCEPT_LAST_ITEM));
+                || line.startsWith(INodeDirectory.DUMPTREE_EXCEPT_LAST_ITEM)
+        );
         checkClassName(line);
       }
     }
   }
   
   @Test
-  public void testReset() throws Exception {
-    fsdir.reset();
-    Assert.assertFalse(fsdir.isReady());
-    final INodeDirectory root = (INodeDirectory) fsdir.getINode("/");
-    Assert.assertTrue(root.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
-    fsdir.imageLoadComplete();
-    Assert.assertTrue(fsdir.isReady());
-  }
-
-  @Test
   public void testSkipQuotaCheck() throws Exception {
     try {
       // set quota. nsQuota of 1 means no files can be created
@@ -176,7 +177,7 @@ public class TestFSDirectory {
     int i = line.lastIndexOf('(');
     int j = line.lastIndexOf('@');
     final String classname = line.substring(i+1, j);
-    Assert.assertTrue(classname.startsWith(INodeFile.class.getSimpleName())
+    assertTrue(classname.startsWith(INodeFile.class.getSimpleName())
         || classname.startsWith(INodeDirectory.class.getSimpleName()));
   }
   
@@ -193,22 +194,185 @@ public class TestFSDirectory {
     // Adding a system namespace xAttr, isn't affected by inode xAttrs limit.
     XAttr newXAttr = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.SYSTEM).
         setName("a3").setValue(new byte[]{0x33, 0x33, 0x33}).build();
-    List<XAttr> xAttrs = fsdir.setINodeXAttr(existingXAttrs, newXAttr, 
+    List<XAttr> newXAttrs = Lists.newArrayListWithCapacity(1);
+    newXAttrs.add(newXAttr);
+    List<XAttr> xAttrs = fsdir.setINodeXAttrs(existingXAttrs, newXAttrs,
         EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
-    Assert.assertEquals(xAttrs.size(), 3);
+    assertEquals(xAttrs.size(), 3);
     
     // Adding a trusted namespace xAttr, is affected by inode xAttrs limit.
     XAttr newXAttr1 = (new XAttr.Builder()).setNameSpace(
         XAttr.NameSpace.TRUSTED).setName("a4").
         setValue(new byte[]{0x34, 0x34, 0x34}).build();
+    newXAttrs.set(0, newXAttr1);
     try {
-      fsdir.setINodeXAttr(existingXAttrs, newXAttr1, 
+      fsdir.setINodeXAttrs(existingXAttrs, newXAttrs,
           EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
-      Assert.fail("Setting user visable xattr on inode should fail if " +
+      fail("Setting user visible xattr on inode should fail if " +
           "reaching limit.");
     } catch (IOException e) {
       GenericTestUtils.assertExceptionContains("Cannot add additional XAttr " +
           "to inode, would exceed limit", e);
     }
   }
+
+  /**
+   * Verify that the first <i>num</i> generatedXAttrs are present in
+   * newXAttrs.
+   */
+  private static void verifyXAttrsPresent(List<XAttr> newXAttrs,
+      final int num) {
+    assertEquals("Unexpected number of XAttrs after multiset", num,
+        newXAttrs.size());
+    for (int i=0; i<num; i++) {
+      XAttr search = generatedXAttrs.get(i);
+      assertTrue("Did not find set XAttr " + search + " + after multiset",
+          newXAttrs.contains(search));
+    }
+  }
+
+  private static List<XAttr> generateXAttrs(final int numXAttrs) {
+    List<XAttr> generatedXAttrs = Lists.newArrayListWithCapacity(numXAttrs);
+    for (int i=0; i<numXAttrs; i++) {
+      XAttr xAttr = (new XAttr.Builder())
+          .setNameSpace(XAttr.NameSpace.SYSTEM)
+          .setName("a" + i)
+          .setValue(new byte[] { (byte) i, (byte) (i + 1), (byte) (i + 2) })
+          .build();
+      generatedXAttrs.add(xAttr);
+    }
+    return generatedXAttrs;
+  }
+
+  /**
+   * Test setting and removing multiple xattrs via single operations
+   */
+  @Test(timeout=300000)
+  public void testXAttrMultiSetRemove() throws Exception {
+    List<XAttr> existingXAttrs = Lists.newArrayListWithCapacity(0);
+
+    // Keep adding a random number of xattrs and verifying until exhausted
+    final Random rand = new Random(0xFEEDA);
+    int numExpectedXAttrs = 0;
+    while (numExpectedXAttrs < numGeneratedXAttrs) {
+      LOG.info("Currently have " + numExpectedXAttrs + " xattrs");
+      final int numToAdd = rand.nextInt(5)+1;
+
+      List<XAttr> toAdd = Lists.newArrayListWithCapacity(numToAdd);
+      for (int i = 0; i < numToAdd; i++) {
+        if (numExpectedXAttrs >= numGeneratedXAttrs) {
+          break;
+        }
+        toAdd.add(generatedXAttrs.get(numExpectedXAttrs));
+        numExpectedXAttrs++;
+      }
+      LOG.info("Attempting to add " + toAdd.size() + " XAttrs");
+      for (int i = 0; i < toAdd.size(); i++) {
+        LOG.info("Will add XAttr " + toAdd.get(i));
+      }
+      List<XAttr> newXAttrs = fsdir.setINodeXAttrs(existingXAttrs, toAdd,
+          EnumSet.of(XAttrSetFlag.CREATE));
+      verifyXAttrsPresent(newXAttrs, numExpectedXAttrs);
+      existingXAttrs = newXAttrs;
+    }
+
+    // Keep removing a random number of xattrs and verifying until all gone
+    while (numExpectedXAttrs > 0) {
+      LOG.info("Currently have " + numExpectedXAttrs + " xattrs");
+      final int numToRemove = rand.nextInt(5)+1;
+      List<XAttr> toRemove = Lists.newArrayListWithCapacity(numToRemove);
+      for (int i = 0; i < numToRemove; i++) {
+        if (numExpectedXAttrs == 0) {
+          break;
+        }
+        toRemove.add(generatedXAttrs.get(numExpectedXAttrs-1));
+        numExpectedXAttrs--;
+      }
+      final int expectedNumToRemove = toRemove.size();
+      LOG.info("Attempting to remove " + expectedNumToRemove + " XAttrs");
+      List<XAttr> removedXAttrs = Lists.newArrayList();
+      List<XAttr> newXAttrs = fsdir.filterINodeXAttrs(existingXAttrs,
+          toRemove, removedXAttrs);
+      assertEquals("Unexpected number of removed XAttrs",
+          expectedNumToRemove, removedXAttrs.size());
+      verifyXAttrsPresent(newXAttrs, numExpectedXAttrs);
+      existingXAttrs = newXAttrs;
+    }
+  }
+
+  @Test(timeout=300000)
+  public void testXAttrMultiAddRemoveErrors() throws Exception {
+
+    // Test that the same XAttr can not be multiset twice
+    List<XAttr> existingXAttrs = Lists.newArrayList();
+    List<XAttr> toAdd = Lists.newArrayList();
+    toAdd.add(generatedXAttrs.get(0));
+    toAdd.add(generatedXAttrs.get(1));
+    toAdd.add(generatedXAttrs.get(2));
+    toAdd.add(generatedXAttrs.get(0));
+    try {
+      fsdir.setINodeXAttrs(existingXAttrs, toAdd, EnumSet.of(XAttrSetFlag
+          .CREATE));
+      fail("Specified the same xattr to be set twice");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Cannot specify the same " +
+          "XAttr to be set", e);
+    }
+
+    // Test that CREATE and REPLACE flags are obeyed
+    toAdd.remove(generatedXAttrs.get(0));
+    existingXAttrs.add(generatedXAttrs.get(0));
+    try {
+      fsdir.setINodeXAttrs(existingXAttrs, toAdd, EnumSet.of(XAttrSetFlag
+          .CREATE));
+      fail("Set XAttr that is already set without REPLACE flag");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("already exists", e);
+    }
+    try {
+      fsdir.setINodeXAttrs(existingXAttrs, toAdd, EnumSet.of(XAttrSetFlag
+          .REPLACE));
+      fail("Set XAttr that does not exist without the CREATE flag");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("does not exist", e);
+    }
+
+    // Sanity test for CREATE
+    toAdd.remove(generatedXAttrs.get(0));
+    List<XAttr> newXAttrs = fsdir.setINodeXAttrs(existingXAttrs, toAdd,
+        EnumSet.of(XAttrSetFlag.CREATE));
+    assertEquals("Unexpected toAdd size", 2, toAdd.size());
+    for (XAttr x : toAdd) {
+      assertTrue("Did not find added XAttr " + x, newXAttrs.contains(x));
+    }
+    existingXAttrs = newXAttrs;
+
+    // Sanity test for REPLACE
+    toAdd = Lists.newArrayList();
+    for (int i=0; i<3; i++) {
+      XAttr xAttr = (new XAttr.Builder())
+          .setNameSpace(XAttr.NameSpace.SYSTEM)
+          .setName("a" + i)
+          .setValue(new byte[] { (byte) (i*2) })
+          .build();
+      toAdd.add(xAttr);
+    }
+    newXAttrs = fsdir.setINodeXAttrs(existingXAttrs, toAdd,
+        EnumSet.of(XAttrSetFlag.REPLACE));
+    assertEquals("Unexpected number of new XAttrs", 3, newXAttrs.size());
+    for (int i=0; i<3; i++) {
+      assertArrayEquals("Unexpected XAttr value",
+          new byte[] {(byte)(i*2)}, newXAttrs.get(i).getValue());
+    }
+    existingXAttrs = newXAttrs;
+
+    // Sanity test for CREATE+REPLACE
+    toAdd = Lists.newArrayList();
+    for (int i=0; i<4; i++) {
+      toAdd.add(generatedXAttrs.get(i));
+    }
+    newXAttrs = fsdir.setINodeXAttrs(existingXAttrs, toAdd,
+        EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
+    verifyXAttrsPresent(newXAttrs, 4);
+  }
 }

Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java Sat Jul 12 02:24:40 2014
@@ -43,7 +43,6 @@ import org.apache.hadoop.hdfs.protocol.H
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
 import org.apache.hadoop.hdfs.util.Canceler;
@@ -194,8 +193,8 @@ public class TestFSImageWithSnapshot {
     fsn = cluster.getNamesystem();
     hdfs = cluster.getFileSystem();
     
-    INodeDirectorySnapshottable rootNode = 
-        (INodeDirectorySnapshottable) fsn.dir.getINode4Write(root.toString());
+    INodeDirectory rootNode = fsn.dir.getINode4Write(root.toString())
+        .asDirectory();
     assertTrue("The children list of root should be empty", 
         rootNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
     // one snapshot on root: s1



Mime
View raw message