hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cnaur...@apache.org
Subject svn commit: r1572325 [3/3] - in /hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/protocol/ src/main/java/org/apache/hadoop/hdfs/protocolPB/ ...
Date Wed, 26 Feb 2014 23:02:02 GMT
Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm?rev=1572325&r1=1572324&r2=1572325&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm Wed Feb 26 23:01:58 2014
@@ -752,6 +752,148 @@ Content-Length: 0
    {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.setTimes
 
 
+** {Modify ACL Entries}
+
+  * Submit a HTTP PUT request.
+
++---------------------------------
+curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=MODIFYACLENTRIES
+                              &aclspec=<ACLSPEC>"
++---------------------------------
+
+  The client receives a response with zero content length:
+
++---------------------------------
+HTTP/1.1 200 OK
+Content-Length: 0
++---------------------------------
+
+  []
+
+  See also:
+  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.modifyAclEntries
+
+
+** {Remove ACL Entries}
+
+  * Submit a HTTP PUT request.
+
++---------------------------------
+curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=REMOVEACLENTRIES
+                              &aclspec=<ACLSPEC>"
++---------------------------------
+
+  The client receives a response with zero content length:
+
++---------------------------------
+HTTP/1.1 200 OK
+Content-Length: 0
++---------------------------------
+
+  []
+
+  See also:
+  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.removeAclEntries
+
+
+** {Remove Default ACL}
+
+  * Submit a HTTP PUT request.
+
++---------------------------------
+curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=REMOVEDEFAULTACL"
++---------------------------------
+
+  The client receives a response with zero content length:
+
++---------------------------------
+HTTP/1.1 200 OK
+Content-Length: 0
++---------------------------------
+
+  []
+
+  See also:
+  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.removeDefaultAcl
+
+
+** {Remove ACL}
+
+  * Submit a HTTP PUT request.
+
++---------------------------------
+curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=REMOVEACL"
++---------------------------------
+
+  The client receives a response with zero content length:
+
++---------------------------------
+HTTP/1.1 200 OK
+Content-Length: 0
++---------------------------------
+
+  []
+
+  See also:
+  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.removeAcl
+
+
+** {Set ACL}
+
+  * Submit a HTTP PUT request.
+
++---------------------------------
+curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=SETACL
+                              &aclspec=<ACLSPEC>"
++---------------------------------
+
+  The client receives a response with zero content length:
+
++---------------------------------
+HTTP/1.1 200 OK
+Content-Length: 0
++---------------------------------
+
+  []
+
+  See also:
+  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.setAcl
+
+
+** {Get ACL Status}
+
+  * Submit a HTTP GET request.
+
++---------------------------------
+curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=GETACLSTATUS"
++---------------------------------
+
+  The client receives a response with a {{{ACL Status JSON Schema}<<<AclStatus>>> JSON object}}:
+
++---------------------------------
+HTTP/1.1 200 OK
+Content-Type: application/json
+Transfer-Encoding: chunked
+
+{
+    "AclStatus": {
+        "entries": [
+            "user:carla:rw-", 
+            "group::r-x"
+        ], 
+        "group": "supergroup", 
+        "owner": "hadoop", 
+        "stickyBit": false
+    }
+}
++---------------------------------
+
+  []
+
+  See also:
+  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getAclStatus
+
+
 * {Delegation Token Operations}
 
 ** {Get Delegation Token}
@@ -980,6 +1122,52 @@ Transfer-Encoding: chunked
   However, if additional properties are included in the responses, they are
   considered as optional properties in order to maintain compatibility.
 
+** {ACL Status JSON Schema}
+
++---------------------------------
+{
+  "name"      : "AclStatus",
+  "properties":
+  {
+    "AclStatus":
+    {
+      "type"      : "object",
+      "properties":
+      {
+        "entries":
+        {
+          "type": "array"
+          "items":
+          {
+            "description": "ACL entry.",
+            "type": "string"
+          }
+        },
+        "group":
+        {
+          "description": "The group owner.",
+          "type"       : "string",
+          "required"   : true
+        },
+        "owner":
+        {
+          "description": "The user who is the owner.",
+          "type"       : "string",
+          "required"   : true
+        },
+        "stickyBit":
+        {
+          "description": "True if the sticky bit is on.",
+          "type"       : "boolean",
+          "required"   : true
+        },
+      }
+    }
+  }
+}
++---------------------------------
+
+
 ** {Boolean JSON Schema}
 
 +---------------------------------
@@ -1387,6 +1575,23 @@ var tokenProperties =
 
 * {HTTP Query Parameter Dictionary}
 
+** {ACL Spec}
+
+*----------------+-------------------------------------------------------------------+
+|| Name          | <<<aclspec>>> |
+*----------------+-------------------------------------------------------------------+
+|| Description   | The ACL spec included in ACL modification operations. |
+*----------------+-------------------------------------------------------------------+
+|| Type          | String |
+*----------------+-------------------------------------------------------------------+
+|| Default Value | \<empty\> |
+*----------------+-------------------------------------------------------------------+
+|| Valid Values  | See {{{./HdfsPermissionsGuide.html}Permissions and HDFS}}. |
+*----------------+-------------------------------------------------------------------+
+|| Syntax        | See {{{./HdfsPermissionsGuide.html}Permissions and HDFS}}. |
+*----------------+-------------------------------------------------------------------+
+
+
 ** {Access Time}
 
 *----------------+-------------------------------------------------------------------+

Propchange: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1547224-1569863

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java?rev=1572325&r1=1572324&r2=1572325&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java Wed Feb 26 23:01:58 2014
@@ -17,15 +17,21 @@
  */
 package org.apache.hadoop.fs.permission;
 
+import static org.apache.hadoop.fs.permission.AclEntryScope.*;
+import static org.apache.hadoop.fs.permission.AclEntryType.*;
+import static org.apache.hadoop.fs.permission.FsAction.*;
+import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
+import java.util.Arrays;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -33,8 +39,12 @@ import org.apache.hadoop.hdfs.DFSTestUti
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.Test;
 
 public class TestStickyBit {
@@ -43,56 +53,89 @@ public class TestStickyBit {
     UserGroupInformation.createUserForTesting("theDoctor", new String[] {"tardis"});
   static UserGroupInformation user2 = 
     UserGroupInformation.createUserForTesting("rose", new String[] {"powellestates"});
-  
+
+  private static MiniDFSCluster cluster;
+  private static Configuration conf;
+  private static FileSystem hdfs;
+  private static FileSystem hdfsAsUser1;
+  private static FileSystem hdfsAsUser2;
+
+  @BeforeClass
+  public static void init() throws Exception {
+    conf = new HdfsConfiguration();
+    conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+    initCluster(true);
+  }
+
+  private static void initCluster(boolean format) throws Exception {
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).format(format)
+      .build();
+    hdfs = cluster.getFileSystem();
+    assertTrue(hdfs instanceof DistributedFileSystem);
+    hdfsAsUser1 = DFSTestUtil.getFileSystemAs(user1, conf);
+    assertTrue(hdfsAsUser1 instanceof DistributedFileSystem);
+    hdfsAsUser2 = DFSTestUtil.getFileSystemAs(user2, conf);
+    assertTrue(hdfsAsUser2 instanceof DistributedFileSystem);
+  }
+
+  @Before
+  public void setup() throws Exception {
+    if (hdfs != null) {
+      for (FileStatus stat: hdfs.listStatus(new Path("/"))) {
+        hdfs.delete(stat.getPath(), true);
+      }
+    }
+  }
+
+  @AfterClass
+  public static void shutdown() throws Exception {
+    IOUtils.cleanup(null, hdfs, hdfsAsUser1, hdfsAsUser2);
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
   /**
    * Ensure that even if a file is in a directory with the sticky bit on,
    * another user can write to that file (assuming correct permissions).
    */
-  private void confirmCanAppend(Configuration conf, FileSystem hdfs,
-      Path baseDir) throws IOException, InterruptedException {
-    // Create a tmp directory with wide-open permissions and sticky bit
-    Path p = new Path(baseDir, "tmp");
-
-    hdfs.mkdirs(p);
-    hdfs.setPermission(p, new FsPermission((short) 01777));
-
+  private void confirmCanAppend(Configuration conf, Path p) throws Exception {
     // Write a file to the new tmp directory as a regular user
-    hdfs = DFSTestUtil.getFileSystemAs(user1, conf);
     Path file = new Path(p, "foo");
-    writeFile(hdfs, file);
-    hdfs.setPermission(file, new FsPermission((short) 0777));
+    writeFile(hdfsAsUser1, file);
+    hdfsAsUser1.setPermission(file, new FsPermission((short) 0777));
 
     // Log onto cluster as another user and attempt to append to file
-    hdfs = DFSTestUtil.getFileSystemAs(user2, conf);
     Path file2 = new Path(p, "foo");
-    FSDataOutputStream h = hdfs.append(file2);
-    h.write("Some more data".getBytes());
-    h.close();
+    FSDataOutputStream h = null;
+    try {
+      h = hdfsAsUser2.append(file2);
+      h.write("Some more data".getBytes());
+      h.close();
+      h = null;
+    } finally {
+      IOUtils.cleanup(null, h);
+    }
   }
 
   /**
    * Test that one user can't delete another user's file when the sticky bit is
    * set.
    */
-  private void confirmDeletingFiles(Configuration conf, FileSystem hdfs,
-      Path baseDir) throws IOException, InterruptedException {
-    Path p = new Path(baseDir, "contemporary");
-    hdfs.mkdirs(p);
-    hdfs.setPermission(p, new FsPermission((short) 01777));
-
+  private void confirmDeletingFiles(Configuration conf, Path p)
+      throws Exception {
     // Write a file to the new temp directory as a regular user
-    hdfs = DFSTestUtil.getFileSystemAs(user1, conf);
     Path file = new Path(p, "foo");
-    writeFile(hdfs, file);
+    writeFile(hdfsAsUser1, file);
 
     // Make sure the correct user is the owner
-    assertEquals(user1.getShortUserName(), hdfs.getFileStatus(file).getOwner());
+    assertEquals(user1.getShortUserName(),
+      hdfsAsUser1.getFileStatus(file).getOwner());
 
     // Log onto cluster as another user and attempt to delete the file
-    FileSystem hdfs2 = DFSTestUtil.getFileSystemAs(user2, conf);
-
     try {
-      hdfs2.delete(file, false);
+      hdfsAsUser2.delete(file, false);
       fail("Shouldn't be able to delete someone else's file with SB on");
     } catch (IOException ioe) {
       assertTrue(ioe instanceof AccessControlException);
@@ -105,13 +148,8 @@ public class TestStickyBit {
    * on, the new directory does not automatically get a sticky bit, as is
    * standard Unix behavior
    */
-  private void confirmStickyBitDoesntPropagate(FileSystem hdfs, Path baseDir)
+  private void confirmStickyBitDoesntPropagate(FileSystem hdfs, Path p)
       throws IOException {
-    Path p = new Path(baseDir, "scissorsisters");
-
-    // Turn on its sticky bit
-    hdfs.mkdirs(p, new FsPermission((short) 01666));
-
     // Create a subdirectory within it
     Path p2 = new Path(p, "bar");
     hdfs.mkdirs(p2);
@@ -123,23 +161,19 @@ public class TestStickyBit {
   /**
    * Test basic ability to get and set sticky bits on files and directories.
    */
-  private void confirmSettingAndGetting(FileSystem hdfs, Path baseDir)
+  private void confirmSettingAndGetting(FileSystem hdfs, Path p, Path baseDir)
       throws IOException {
-    Path p1 = new Path(baseDir, "roguetraders");
-
-    hdfs.mkdirs(p1);
-
     // Initially sticky bit should not be set
-    assertFalse(hdfs.getFileStatus(p1).getPermission().getStickyBit());
+    assertFalse(hdfs.getFileStatus(p).getPermission().getStickyBit());
 
     // Same permission, but with sticky bit on
     short withSB;
-    withSB = (short) (hdfs.getFileStatus(p1).getPermission().toShort() | 01000);
+    withSB = (short) (hdfs.getFileStatus(p).getPermission().toShort() | 01000);
 
     assertTrue((new FsPermission(withSB)).getStickyBit());
 
-    hdfs.setPermission(p1, new FsPermission(withSB));
-    assertTrue(hdfs.getFileStatus(p1).getPermission().getStickyBit());
+    hdfs.setPermission(p, new FsPermission(withSB));
+    assertTrue(hdfs.getFileStatus(p).getPermission().getStickyBit());
 
     // Write a file to the fs, try to set its sticky bit
     Path f = new Path(baseDir, "somefile");
@@ -154,37 +188,78 @@ public class TestStickyBit {
   }
 
   @Test
-  public void testGeneralSBBehavior() throws IOException, InterruptedException {
-    MiniDFSCluster cluster = null;
-    try {
-      Configuration conf = new HdfsConfiguration();
-      conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+  public void testGeneralSBBehavior() throws Exception {
+    Path baseDir = new Path("/mcgann");
+    hdfs.mkdirs(baseDir);
+
+    // Create a tmp directory with wide-open permissions and sticky bit
+    Path p = new Path(baseDir, "tmp");
+
+    hdfs.mkdirs(p);
+    hdfs.setPermission(p, new FsPermission((short) 01777));
+
+    confirmCanAppend(conf, p);
 
-      FileSystem hdfs = cluster.getFileSystem();
+    baseDir = new Path("/eccleston");
+    hdfs.mkdirs(baseDir);
+    p = new Path(baseDir, "roguetraders");
 
-      assertTrue(hdfs instanceof DistributedFileSystem);
+    hdfs.mkdirs(p);
+    confirmSettingAndGetting(hdfs, p, baseDir);
 
-      Path baseDir = new Path("/mcgann");
-      hdfs.mkdirs(baseDir);
-      confirmCanAppend(conf, hdfs, baseDir);
+    baseDir = new Path("/tennant");
+    hdfs.mkdirs(baseDir);
+    p = new Path(baseDir, "contemporary");
+    hdfs.mkdirs(p);
+    hdfs.setPermission(p, new FsPermission((short) 01777));
+    confirmDeletingFiles(conf, p);
 
-      baseDir = new Path("/eccleston");
-      hdfs.mkdirs(baseDir);
-      confirmSettingAndGetting(hdfs, baseDir);
+    baseDir = new Path("/smith");
+    hdfs.mkdirs(baseDir);
+    p = new Path(baseDir, "scissorsisters");
 
-      baseDir = new Path("/tennant");
-      hdfs.mkdirs(baseDir);
-      confirmDeletingFiles(conf, hdfs, baseDir);
+    // Turn on its sticky bit
+    hdfs.mkdirs(p, new FsPermission((short) 01666));
+    confirmStickyBitDoesntPropagate(hdfs, baseDir);
+  }
 
-      baseDir = new Path("/smith");
-      hdfs.mkdirs(baseDir);
-      confirmStickyBitDoesntPropagate(hdfs, baseDir);
+  @Test
+  public void testAclGeneralSBBehavior() throws Exception {
+    Path baseDir = new Path("/mcgann");
+    hdfs.mkdirs(baseDir);
 
-    } finally {
-      if (cluster != null)
-        cluster.shutdown();
-    }
+    // Create a tmp directory with wide-open permissions and sticky bit
+    Path p = new Path(baseDir, "tmp");
+
+    hdfs.mkdirs(p);
+    hdfs.setPermission(p, new FsPermission((short) 01777));
+    applyAcl(p);
+    confirmCanAppend(conf, p);
+
+    baseDir = new Path("/eccleston");
+    hdfs.mkdirs(baseDir);
+    p = new Path(baseDir, "roguetraders");
+
+    hdfs.mkdirs(p);
+    applyAcl(p);
+    confirmSettingAndGetting(hdfs, p, baseDir);
+
+    baseDir = new Path("/tennant");
+    hdfs.mkdirs(baseDir);
+    p = new Path(baseDir, "contemporary");
+    hdfs.mkdirs(p);
+    hdfs.setPermission(p, new FsPermission((short) 01777));
+    applyAcl(p);
+    confirmDeletingFiles(conf, p);
+
+    baseDir = new Path("/smith");
+    hdfs.mkdirs(baseDir);
+    p = new Path(baseDir, "scissorsisters");
+
+    // Turn on its sticky bit
+    hdfs.mkdirs(p, new FsPermission((short) 01666));
+    applyAcl(p);
+    confirmStickyBitDoesntPropagate(hdfs, p);
   }
 
   /**
@@ -192,46 +267,42 @@ public class TestStickyBit {
    * bit is set.
    */
   @Test
-  public void testMovingFiles() throws IOException, InterruptedException {
-    MiniDFSCluster cluster = null;
+  public void testMovingFiles() throws Exception {
+    testMovingFiles(false);
+  }
+
+  @Test
+  public void testAclMovingFiles() throws Exception {
+    testMovingFiles(true);
+  }
+
+  private void testMovingFiles(boolean useAcl) throws Exception {
+    // Create a tmp directory with wide-open permissions and sticky bit
+    Path tmpPath = new Path("/tmp");
+    Path tmpPath2 = new Path("/tmp2");
+    hdfs.mkdirs(tmpPath);
+    hdfs.mkdirs(tmpPath2);
+    hdfs.setPermission(tmpPath, new FsPermission((short) 01777));
+    if (useAcl) {
+      applyAcl(tmpPath);
+    }
+    hdfs.setPermission(tmpPath2, new FsPermission((short) 01777));
+    if (useAcl) {
+      applyAcl(tmpPath2);
+    }
+
+    // Write a file to the new tmp directory as a regular user
+    Path file = new Path(tmpPath, "foo");
 
+    writeFile(hdfsAsUser1, file);
+
+    // Log onto cluster as another user and attempt to move the file
     try {
-      // Set up cluster for testing
-      Configuration conf = new HdfsConfiguration();
-      conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
-      FileSystem hdfs = cluster.getFileSystem();
-
-      assertTrue(hdfs instanceof DistributedFileSystem);
-
-      // Create a tmp directory with wide-open permissions and sticky bit
-      Path tmpPath = new Path("/tmp");
-      Path tmpPath2 = new Path("/tmp2");
-      hdfs.mkdirs(tmpPath);
-      hdfs.mkdirs(tmpPath2);
-      hdfs.setPermission(tmpPath, new FsPermission((short) 01777));
-      hdfs.setPermission(tmpPath2, new FsPermission((short) 01777));
-
-      // Write a file to the new tmp directory as a regular user
-      Path file = new Path(tmpPath, "foo");
-
-      FileSystem hdfs2 = DFSTestUtil.getFileSystemAs(user1, conf);
-
-      writeFile(hdfs2, file);
-
-      // Log onto cluster as another user and attempt to move the file
-      FileSystem hdfs3 = DFSTestUtil.getFileSystemAs(user2, conf);
-
-      try {
-        hdfs3.rename(file, new Path(tmpPath2, "renamed"));
-        fail("Shouldn't be able to rename someone else's file with SB on");
-      } catch (IOException ioe) {
-        assertTrue(ioe instanceof AccessControlException);
-        assertTrue(ioe.getMessage().contains("sticky bit"));
-      }
-    } finally {
-      if (cluster != null)
-        cluster.shutdown();
+      hdfsAsUser2.rename(file, new Path(tmpPath2, "renamed"));
+      fail("Shouldn't be able to rename someone else's file with SB on");
+    } catch (IOException ioe) {
+      assertTrue(ioe instanceof AccessControlException);
+      assertTrue(ioe.getMessage().contains("sticky bit"));
     }
   }
 
@@ -241,56 +312,91 @@ public class TestStickyBit {
    * re-start.
    */
   @Test
-  public void testStickyBitPersistence() throws IOException {
-    MiniDFSCluster cluster = null;
-    try {
-      Configuration conf = new HdfsConfiguration();
-      conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
-      FileSystem hdfs = cluster.getFileSystem();
-
-      assertTrue(hdfs instanceof DistributedFileSystem);
-
-      // A tale of three directories...
-      Path sbSet = new Path("/Housemartins");
-      Path sbNotSpecified = new Path("/INXS");
-      Path sbSetOff = new Path("/Easyworld");
-
-      for (Path p : new Path[] { sbSet, sbNotSpecified, sbSetOff })
-        hdfs.mkdirs(p);
-
-      // Two directories had there sticky bits set explicitly...
-      hdfs.setPermission(sbSet, new FsPermission((short) 01777));
-      hdfs.setPermission(sbSetOff, new FsPermission((short) 00777));
+  public void testStickyBitPersistence() throws Exception {
+    // A tale of three directories...
+    Path sbSet = new Path("/Housemartins");
+    Path sbNotSpecified = new Path("/INXS");
+    Path sbSetOff = new Path("/Easyworld");
 
-      cluster.shutdown();
+    for (Path p : new Path[] { sbSet, sbNotSpecified, sbSetOff })
+      hdfs.mkdirs(p);
 
-      // Start file system up again
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).format(false).build();
-      hdfs = cluster.getFileSystem();
-
-      assertTrue(hdfs.exists(sbSet));
-      assertTrue(hdfs.getFileStatus(sbSet).getPermission().getStickyBit());
-
-      assertTrue(hdfs.exists(sbNotSpecified));
-      assertFalse(hdfs.getFileStatus(sbNotSpecified).getPermission()
-          .getStickyBit());
+    // Two directories had there sticky bits set explicitly...
+    hdfs.setPermission(sbSet, new FsPermission((short) 01777));
+    hdfs.setPermission(sbSetOff, new FsPermission((short) 00777));
 
-      assertTrue(hdfs.exists(sbSetOff));
-      assertFalse(hdfs.getFileStatus(sbSetOff).getPermission().getStickyBit());
+    shutdown();
 
-    } finally {
-      if (cluster != null)
-        cluster.shutdown();
-    }
+    // Start file system up again
+    initCluster(false);
+
+    assertTrue(hdfs.exists(sbSet));
+    assertTrue(hdfs.getFileStatus(sbSet).getPermission().getStickyBit());
+
+    assertTrue(hdfs.exists(sbNotSpecified));
+    assertFalse(hdfs.getFileStatus(sbNotSpecified).getPermission()
+        .getStickyBit());
+
+    assertTrue(hdfs.exists(sbSetOff));
+    assertFalse(hdfs.getFileStatus(sbSetOff).getPermission().getStickyBit());
+  }
+
+  @Test
+  public void testAclStickyBitPersistence() throws Exception {
+    // A tale of three directories...
+    Path sbSet = new Path("/Housemartins");
+    Path sbNotSpecified = new Path("/INXS");
+    Path sbSetOff = new Path("/Easyworld");
+
+    for (Path p : new Path[] { sbSet, sbNotSpecified, sbSetOff })
+      hdfs.mkdirs(p);
+
+    // Two directories had there sticky bits set explicitly...
+    hdfs.setPermission(sbSet, new FsPermission((short) 01777));
+    applyAcl(sbSet);
+    hdfs.setPermission(sbSetOff, new FsPermission((short) 00777));
+    applyAcl(sbSetOff);
+
+    shutdown();
+
+    // Start file system up again
+    initCluster(false);
+
+    assertTrue(hdfs.exists(sbSet));
+    assertTrue(hdfs.getFileStatus(sbSet).getPermission().getStickyBit());
+
+    assertTrue(hdfs.exists(sbNotSpecified));
+    assertFalse(hdfs.getFileStatus(sbNotSpecified).getPermission()
+        .getStickyBit());
+
+    assertTrue(hdfs.exists(sbSetOff));
+    assertFalse(hdfs.getFileStatus(sbSetOff).getPermission().getStickyBit());
   }
 
   /***
    * Write a quick file to the specified file system at specified path
    */
   static private void writeFile(FileSystem hdfs, Path p) throws IOException {
-    FSDataOutputStream o = hdfs.create(p);
-    o.write("some file contents".getBytes());
-    o.close();
+    FSDataOutputStream o = null;
+    try {
+      o = hdfs.create(p);
+      o.write("some file contents".getBytes());
+      o.close();
+      o = null;
+    } finally {
+      IOUtils.cleanup(null, o);
+    }
+  }
+
+  /**
+   * Applies an ACL (both access and default) to the given path.
+   *
+   * @param p Path to set
+   * @throws IOException if an ACL could not be modified
+   */
+  private static void applyAcl(Path p) throws IOException {
+    hdfs.modifyAclEntries(p, Arrays.asList(
+      aclEntry(ACCESS, USER, user2.getShortUserName(), ALL),
+      aclEntry(DEFAULT, USER, user2.getShortUserName(), ALL)));
   }
 }

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1572325&r1=1572324&r2=1572325&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Wed Feb 26 23:01:58 2014
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
 
 import com.google.common.base.Charsets;
 import com.google.common.base.Joiner;
+import com.google.common.collect.Lists;
 
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.logging.Log;
@@ -29,6 +30,7 @@ import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.Options.Rename;
+import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
@@ -1095,6 +1097,8 @@ public class DFSTestUtil {
     filesystem.removeCacheDirective(id);
     // OP_REMOVE_CACHE_POOL
     filesystem.removeCachePool("pool1");
+    // OP_SET_ACL
+    filesystem.setAcl(pathConcatTarget, Lists.<AclEntry> newArrayList());
   }
 
   public static void abortStream(DFSOutputStream out) throws IOException {

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java?rev=1572325&r1=1572324&r2=1572325&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java Wed Feb 26 23:01:58 2014
@@ -31,6 +31,7 @@ import org.apache.hadoop.fs.FSDataOutput
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
@@ -65,6 +66,7 @@ public class TestSafeMode {
   public void startUp() throws IOException {
     conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
     cluster.waitActive();      
     fs = cluster.getFileSystem();
@@ -328,12 +330,48 @@ public class TestSafeMode {
         fs.setTimes(file1, 0, 0);
       }});
 
+    runFsFun("modifyAclEntries while in SM", new FSRun() {
+      @Override
+      public void run(FileSystem fs) throws IOException {
+        fs.modifyAclEntries(file1, Lists.<AclEntry>newArrayList());
+      }});
+
+    runFsFun("removeAclEntries while in SM", new FSRun() {
+      @Override
+      public void run(FileSystem fs) throws IOException {
+        fs.removeAclEntries(file1, Lists.<AclEntry>newArrayList());
+      }});
+
+    runFsFun("removeDefaultAcl while in SM", new FSRun() {
+      @Override
+      public void run(FileSystem fs) throws IOException {
+        fs.removeDefaultAcl(file1);
+      }});
+
+    runFsFun("removeAcl while in SM", new FSRun() {
+      @Override
+      public void run(FileSystem fs) throws IOException {
+        fs.removeAcl(file1);
+      }});
+
+    runFsFun("setAcl while in SM", new FSRun() {
+      @Override
+      public void run(FileSystem fs) throws IOException {
+        fs.setAcl(file1, Lists.<AclEntry>newArrayList());
+      }});
+
     try {
       DFSTestUtil.readFile(fs, file1);
     } catch (IOException ioe) {
       fail("Set times failed while in SM");
     }
 
+    try {
+      fs.getAclStatus(file1);
+    } catch (IOException ioe) {
+      fail("getAclStatus failed while in SM");
+    }
+
     assertFalse("Could not leave SM",
         dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
   }

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java?rev=1572325&r1=1572324&r2=1572325&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java Wed Feb 26 23:01:58 2014
@@ -26,6 +26,11 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclEntryScope;
+import org.apache.hadoop.fs.permission.AclEntryType;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -68,6 +73,7 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.DataChecksum;
+import org.junit.Assert;
 import org.junit.Test;
 
 import com.google.common.base.Joiner;
@@ -580,4 +586,39 @@ public class TestPBHelper {
     assertEquals(PBHelper.convert(DataChecksum.Type.CRC32C),
         HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32C);
   }
+
+  @Test
+  public void testAclEntryProto() {
+    // All fields populated.
+    AclEntry e1 = new AclEntry.Builder().setName("test")
+        .setPermission(FsAction.READ_EXECUTE).setScope(AclEntryScope.DEFAULT)
+        .setType(AclEntryType.OTHER).build();
+    // No name.
+    AclEntry e2 = new AclEntry.Builder().setScope(AclEntryScope.ACCESS)
+        .setType(AclEntryType.USER).setPermission(FsAction.ALL).build();
+    // No permission, which will default to the 0'th enum element.
+    AclEntry e3 = new AclEntry.Builder().setScope(AclEntryScope.ACCESS)
+        .setType(AclEntryType.USER).setName("test").build();
+    AclEntry[] expected = new AclEntry[] { e1, e2,
+        new AclEntry.Builder()
+            .setScope(e3.getScope())
+            .setType(e3.getType())
+            .setName(e3.getName())
+            .setPermission(FsAction.NONE)
+            .build() };
+    AclEntry[] actual = Lists.newArrayList(
+        PBHelper.convertAclEntry(PBHelper.convertAclEntryProto(Lists
+            .newArrayList(e1, e2, e3)))).toArray(new AclEntry[0]);
+    Assert.assertArrayEquals(expected, actual);
+  }
+
+  @Test
+  public void testAclStatusProto() {
+    AclEntry e = new AclEntry.Builder().setName("test")
+        .setPermission(FsAction.READ_EXECUTE).setScope(AclEntryScope.DEFAULT)
+        .setType(AclEntryType.OTHER).build();
+    AclStatus s = new AclStatus.Builder().owner("foo").group("bar").addEntry(e)
+        .build();
+    Assert.assertEquals(s, PBHelper.convert(PBHelper.convert(s)));
+  }
 }

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java?rev=1572325&r1=1572324&r2=1572325&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java Wed Feb 26 23:01:58 2014
@@ -96,6 +96,7 @@ public class OfflineEditsViewerHelper {
       "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
     config.setBoolean(
         DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
+    config.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
     cluster =
       new MiniDFSCluster.Builder(config).manageNameDfsDirs(false).build();
     cluster.waitClusterUp();

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java?rev=1572325&r1=1572324&r2=1572325&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java Wed Feb 26 23:01:58 2014
@@ -91,6 +91,7 @@ public class TestNamenodeRetryCache {
     conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
     cluster = new MiniDFSCluster.Builder(conf).build();
     cluster.waitActive();
     namesystem = cluster.getNamesystem();

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java?rev=1572325&r1=1572324&r2=1572325&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java Wed Feb 26 23:01:58 2014
@@ -125,6 +125,7 @@ public class TestRetryCacheWithHA {
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES, ResponseSize);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, ResponseSize);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
     cluster = new MiniDFSCluster.Builder(conf)
         .nnTopology(MiniDFSNNTopology.simpleHATopology())
         .numDataNodes(DataNodes).build();

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java?rev=1572325&r1=1572324&r2=1572325&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java Wed Feb 26 23:01:58 2014
@@ -305,7 +305,8 @@ public class TestDiff {
     final int i = Diff.search(current, inode.getKey());
     Assert.assertTrue(i >= 0);
     final INodeDirectory oldinode = (INodeDirectory)current.get(i);
-    final INodeDirectory newinode = new INodeDirectory(oldinode, false, true);
+    final INodeDirectory newinode = new INodeDirectory(oldinode, false,
+      oldinode.getFeatures());
     newinode.setModificationTime(oldinode.getModificationTime() + 1);
 
     current.set(i, newinode);

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java?rev=1572325&r1=1572324&r2=1572325&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java Wed Feb 26 23:01:58 2014
@@ -17,11 +17,19 @@
  */
 package org.apache.hadoop.hdfs.web;
 
+import static org.apache.hadoop.fs.permission.AclEntryScope.*;
+import static org.apache.hadoop.fs.permission.AclEntryType.*;
+import static org.apache.hadoop.fs.permission.FsAction.*;
+import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
+
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -32,6 +40,8 @@ import org.junit.Assert;
 import org.junit.Test;
 import org.mortbay.util.ajax.JSON;
 
+import com.google.common.collect.Lists;
+
 public class TestJsonUtil {
   static FileStatus toFileStatus(HdfsFileStatus f, String parent) {
     return new FileStatus(f.getLen(), f.isDir(), f.getReplication(),
@@ -135,6 +145,47 @@ public class TestJsonUtil {
     response.put("ipAddr", "127.0.0.1");
     checkDecodeFailure(response);
   }
+  
+  @Test
+  public void testToAclStatus() {
+    String jsonString =
+        "{\"AclStatus\":{\"entries\":[\"user::rwx\",\"user:user1:rw-\",\"group::rw-\",\"other::r-x\"],\"group\":\"supergroup\",\"owner\":\"testuser\",\"stickyBit\":false}}";
+    Map<?, ?> json = (Map<?, ?>) JSON.parse(jsonString);
+
+    List<AclEntry> aclSpec =
+        Lists.newArrayList(aclEntry(ACCESS, USER, ALL),
+            aclEntry(ACCESS, USER, "user1", READ_WRITE),
+            aclEntry(ACCESS, GROUP, READ_WRITE),
+            aclEntry(ACCESS, OTHER, READ_EXECUTE));
+
+    AclStatus.Builder aclStatusBuilder = new AclStatus.Builder();
+    aclStatusBuilder.owner("testuser");
+    aclStatusBuilder.group("supergroup");
+    aclStatusBuilder.addEntries(aclSpec);
+    aclStatusBuilder.stickyBit(false);
+
+    Assert.assertEquals("Should be equal", aclStatusBuilder.build(),
+        JsonUtil.toAclStatus(json));
+  }
+
+  @Test
+  public void testToJsonFromAclStatus() {
+    String jsonString =
+        "{\"AclStatus\":{\"entries\":[\"user:user1:rwx\",\"group::rw-\"],\"group\":\"supergroup\",\"owner\":\"testuser\",\"stickyBit\":false}}";
+    AclStatus.Builder aclStatusBuilder = new AclStatus.Builder();
+    aclStatusBuilder.owner("testuser");
+    aclStatusBuilder.group("supergroup");
+    aclStatusBuilder.stickyBit(false);
+
+    List<AclEntry> aclSpec =
+        Lists.newArrayList(aclEntry(ACCESS, USER,"user1", ALL),
+            aclEntry(ACCESS, GROUP, READ_WRITE));
+
+    aclStatusBuilder.addEntries(aclSpec);
+    Assert.assertEquals(jsonString,
+        JsonUtil.toJsonString(aclStatusBuilder.build()));
+
+  }
 
   private void checkDecodeFailure(Map<String, Object> map) {
     try {

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java?rev=1572325&r1=1572324&r2=1572325&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java Wed Feb 26 23:01:58 2014
@@ -21,12 +21,14 @@ import static org.junit.Assert.assertNot
 import static org.junit.Assert.assertNull;
 
 import java.util.Arrays;
+import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.util.StringUtils;
@@ -300,4 +302,48 @@ public class TestParam {
 
     UserParam.setUserPatternDomain(oldDomain);
   }
+
+  @Test
+  public void testAclPermissionParam() {
+    final AclPermissionParam p =
+        new AclPermissionParam("user::rwx,group::r--,other::rwx,user:user1:rwx");
+    List<AclEntry> setAclList =
+        AclEntry.parseAclSpec("user::rwx,group::r--,other::rwx,user:user1:rwx",
+            true);
+    Assert.assertEquals(setAclList.toString(), p.getAclPermission(true)
+        .toString());
+
+    new AclPermissionParam("user::rw-,group::rwx,other::rw-,user:user1:rwx");
+    try {
+      new AclPermissionParam("user::rw--,group::rwx-,other::rw-");
+      Assert.fail();
+    } catch (IllegalArgumentException e) {
+      LOG.info("EXPECTED: " + e);
+    }
+
+    new AclPermissionParam(
+        "user::rw-,group::rwx,other::rw-,user:user1:rwx,group:group1:rwx,other::rwx,mask::rwx,default:user:user1:rwx");
+
+    try {
+      new AclPermissionParam("user:r-,group:rwx,other:rw-");
+      Assert.fail();
+    } catch (IllegalArgumentException e) {
+      LOG.info("EXPECTED: " + e);
+    }
+
+    try {
+      new AclPermissionParam("default:::r-,default:group::rwx,other::rw-");
+      Assert.fail();
+    } catch (IllegalArgumentException e) {
+      LOG.info("EXPECTED: " + e);
+    }
+
+    try {
+      new AclPermissionParam("user:r-,group::rwx,other:rw-,mask:rw-,temp::rwx");
+      Assert.fail();
+    } catch (IllegalArgumentException e) {
+      LOG.info("EXPECTED: " + e);
+    }
+  }
+ 
 }

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java?rev=1572325&r1=1572324&r2=1572325&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java Wed Feb 26 23:01:58 2014
@@ -17,6 +17,10 @@
  */
 package org.apache.hadoop.security;
 
+import static org.apache.hadoop.fs.permission.AclEntryScope.*;
+import static org.apache.hadoop.fs.permission.AclEntryType.*;
+import static org.apache.hadoop.fs.permission.FsAction.*;
+import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -24,6 +28,7 @@ import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
+import java.util.Arrays;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -66,6 +71,7 @@ public class TestPermissionSymlinks {
   @BeforeClass
   public static void beforeClassSetUp() throws Exception {
     conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
     conf.set(FsPermission.UMASK_LABEL, "000");
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     cluster.waitActive();
@@ -101,8 +107,43 @@ public class TestPermissionSymlinks {
 
   @Test(timeout = 5000)
   public void testDelete() throws Exception {
-    // Try to delete where the symlink's parent dir is not writable
     fs.setPermission(linkParent, new FsPermission((short) 0555));
+    doDeleteLinkParentNotWritable();
+
+    fs.setPermission(linkParent, new FsPermission((short) 0777));
+    fs.setPermission(targetParent, new FsPermission((short) 0555));
+    fs.setPermission(target, new FsPermission((short) 0555));
+    doDeleteTargetParentAndTargetNotWritable();
+  }
+
+  @Test
+  public void testAclDelete() throws Exception {
+    fs.setAcl(linkParent, Arrays.asList(
+      aclEntry(ACCESS, USER, ALL),
+      aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
+      aclEntry(ACCESS, GROUP, ALL),
+      aclEntry(ACCESS, OTHER, ALL)));
+    doDeleteLinkParentNotWritable();
+
+    fs.setAcl(linkParent, Arrays.asList(
+      aclEntry(ACCESS, USER, ALL),
+      aclEntry(ACCESS, GROUP, ALL),
+      aclEntry(ACCESS, OTHER, ALL)));
+    fs.setAcl(targetParent, Arrays.asList(
+      aclEntry(ACCESS, USER, ALL),
+      aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
+      aclEntry(ACCESS, GROUP, ALL),
+      aclEntry(ACCESS, OTHER, ALL)));
+    fs.setAcl(target, Arrays.asList(
+      aclEntry(ACCESS, USER, ALL),
+      aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
+      aclEntry(ACCESS, GROUP, ALL),
+      aclEntry(ACCESS, OTHER, ALL)));
+    doDeleteTargetParentAndTargetNotWritable();
+  }
+
+  private void doDeleteLinkParentNotWritable() throws Exception {
+    // Try to delete where the symlink's parent dir is not writable
     try {
       user.doAs(new PrivilegedExceptionAction<Object>() {
         @Override
@@ -116,11 +157,11 @@ public class TestPermissionSymlinks {
     } catch (AccessControlException e) {
       GenericTestUtils.assertExceptionContains("Permission denied", e);
     }
+  }
+
+  private void doDeleteTargetParentAndTargetNotWritable() throws Exception {
     // Try a delete where the symlink parent dir is writable,
     // but the target's parent and target are not
-    fs.setPermission(linkParent, new FsPermission((short) 0777));
-    fs.setPermission(targetParent, new FsPermission((short) 0555));
-    fs.setPermission(target, new FsPermission((short) 0555));
     user.doAs(new PrivilegedExceptionAction<Object>() {
       @Override
       public Object run() throws IOException {
@@ -139,6 +180,20 @@ public class TestPermissionSymlinks {
   @Test(timeout = 5000)
   public void testReadWhenTargetNotReadable() throws Exception {
     fs.setPermission(target, new FsPermission((short) 0000));
+    doReadTargetNotReadable();
+  }
+
+  @Test
+  public void testAclReadTargetNotReadable() throws Exception {
+    fs.setAcl(target, Arrays.asList(
+      aclEntry(ACCESS, USER, READ_WRITE),
+      aclEntry(ACCESS, USER, user.getUserName(), NONE),
+      aclEntry(ACCESS, GROUP, READ),
+      aclEntry(ACCESS, OTHER, READ)));
+    doReadTargetNotReadable();
+  }
+
+  private void doReadTargetNotReadable() throws Exception {
     try {
       user.doAs(new PrivilegedExceptionAction<Object>() {
         @Override
@@ -157,8 +212,22 @@ public class TestPermissionSymlinks {
 
   @Test(timeout = 5000)
   public void testFileStatus() throws Exception {
-    // Try to getFileLinkStatus the link when the target is not readable
     fs.setPermission(target, new FsPermission((short) 0000));
+    doGetFileLinkStatusTargetNotReadable();
+  }
+
+  @Test
+  public void testAclGetFileLinkStatusTargetNotReadable() throws Exception {
+    fs.setAcl(target, Arrays.asList(
+      aclEntry(ACCESS, USER, READ_WRITE),
+      aclEntry(ACCESS, USER, user.getUserName(), NONE),
+      aclEntry(ACCESS, GROUP, READ),
+      aclEntry(ACCESS, OTHER, READ)));
+    doGetFileLinkStatusTargetNotReadable();
+  }
+
+  private void doGetFileLinkStatusTargetNotReadable() throws Exception {
+    // Try to getFileLinkStatus the link when the target is not readable
     user.doAs(new PrivilegedExceptionAction<Object>() {
       @Override
       public Object run() throws IOException {
@@ -176,9 +245,28 @@ public class TestPermissionSymlinks {
 
   @Test(timeout = 5000)
   public void testRenameLinkTargetNotWritableFC() throws Exception {
-    // Rename the link when the target and parent are not writable
     fs.setPermission(target, new FsPermission((short) 0555));
     fs.setPermission(targetParent, new FsPermission((short) 0555));
+    doRenameLinkTargetNotWritableFC();
+  }
+
+  @Test
+  public void testAclRenameTargetNotWritableFC() throws Exception {
+    fs.setAcl(target, Arrays.asList(
+      aclEntry(ACCESS, USER, ALL),
+      aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
+      aclEntry(ACCESS, GROUP, ALL),
+      aclEntry(ACCESS, OTHER, ALL)));
+    fs.setAcl(targetParent, Arrays.asList(
+      aclEntry(ACCESS, USER, ALL),
+      aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
+      aclEntry(ACCESS, GROUP, ALL),
+      aclEntry(ACCESS, OTHER, ALL)));
+    doRenameLinkTargetNotWritableFC();
+  }
+
+  private void doRenameLinkTargetNotWritableFC() throws Exception {
+    // Rename the link when the target and parent are not writable
     user.doAs(new PrivilegedExceptionAction<Object>() {
       @Override
       public Object run() throws IOException {
@@ -197,8 +285,22 @@ public class TestPermissionSymlinks {
 
   @Test(timeout = 5000)
   public void testRenameSrcNotWritableFC() throws Exception {
-    // Rename the link when the target and parent are not writable
     fs.setPermission(linkParent, new FsPermission((short) 0555));
+    doRenameSrcNotWritableFC();
+  }
+
+  @Test
+  public void testAclRenameSrcNotWritableFC() throws Exception {
+    fs.setAcl(linkParent, Arrays.asList(
+      aclEntry(ACCESS, USER, ALL),
+      aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
+      aclEntry(ACCESS, GROUP, ALL),
+      aclEntry(ACCESS, OTHER, ALL)));
+    doRenameSrcNotWritableFC();
+  }
+
+  private void doRenameSrcNotWritableFC() throws Exception {
+    // Rename the link when the target and parent are not writable
     try {
       user.doAs(new PrivilegedExceptionAction<Object>() {
         @Override
@@ -220,9 +322,28 @@ public class TestPermissionSymlinks {
 
   @Test(timeout = 5000)
   public void testRenameLinkTargetNotWritableFS() throws Exception {
-    // Rename the link when the target and parent are not writable
     fs.setPermission(target, new FsPermission((short) 0555));
     fs.setPermission(targetParent, new FsPermission((short) 0555));
+    doRenameLinkTargetNotWritableFS();
+  }
+
+  @Test
+  public void testAclRenameTargetNotWritableFS() throws Exception {
+    fs.setAcl(target, Arrays.asList(
+      aclEntry(ACCESS, USER, ALL),
+      aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
+      aclEntry(ACCESS, GROUP, ALL),
+      aclEntry(ACCESS, OTHER, ALL)));
+    fs.setAcl(targetParent, Arrays.asList(
+      aclEntry(ACCESS, USER, ALL),
+      aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
+      aclEntry(ACCESS, GROUP, ALL),
+      aclEntry(ACCESS, OTHER, ALL)));
+    doRenameLinkTargetNotWritableFS();
+  }
+
+  private void doRenameLinkTargetNotWritableFS() throws Exception {
+    // Rename the link when the target and parent are not writable
     user.doAs(new PrivilegedExceptionAction<Object>() {
       @Override
       public Object run() throws IOException {
@@ -241,8 +362,22 @@ public class TestPermissionSymlinks {
 
   @Test(timeout = 5000)
   public void testRenameSrcNotWritableFS() throws Exception {
-    // Rename the link when the target and parent are not writable
     fs.setPermission(linkParent, new FsPermission((short) 0555));
+    doRenameSrcNotWritableFS();
+  }
+
+  @Test
+  public void testAclRenameSrcNotWritableFS() throws Exception {
+    fs.setAcl(linkParent, Arrays.asList(
+      aclEntry(ACCESS, USER, ALL),
+      aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
+      aclEntry(ACCESS, GROUP, ALL),
+      aclEntry(ACCESS, OTHER, ALL)));
+    doRenameSrcNotWritableFS();
+  }
+
+  private void doRenameSrcNotWritableFS() throws Exception {
+    // Rename the link when the target and parent are not writable
     try {
       user.doAs(new PrivilegedExceptionAction<Object>() {
         @Override
@@ -258,6 +393,4 @@ public class TestPermissionSymlinks {
       GenericTestUtils.assertExceptionContains("Permission denied", e);
     }
   }
-
-
 }

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored?rev=1572325&r1=1572324&r2=1572325&view=diff
==============================================================================
Binary files - no diff available.

Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml?rev=1572325&r1=1572324&r2=1572325&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml (original)
+++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml Wed Feb 26 23:01:58 2014
@@ -1,6 +1,6 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <EDITS>
-  <EDITS_VERSION>-52</EDITS_VERSION>
+  <EDITS_VERSION>-53</EDITS_VERSION>
   <RECORD>
     <OPCODE>OP_START_LOG_SEGMENT</OPCODE>
     <DATA>
@@ -13,8 +13,8 @@
       <TXID>2</TXID>
       <DELEGATION_KEY>
         <KEY_ID>1</KEY_ID>
-        <EXPIRY_DATE>1390942564729</EXPIRY_DATE>
-        <KEY>f270a35a4ebb9984</KEY>
+        <EXPIRY_DATE>1390519460949</EXPIRY_DATE>
+        <KEY>dc8d30edc97df67d</KEY>
       </DELEGATION_KEY>
     </DATA>
   </RECORD>
@@ -24,8 +24,8 @@
       <TXID>3</TXID>
       <DELEGATION_KEY>
         <KEY_ID>2</KEY_ID>
-        <EXPIRY_DATE>1390942564735</EXPIRY_DATE>
-        <KEY>22391ec22bc0fc20</KEY>
+        <EXPIRY_DATE>1390519460952</EXPIRY_DATE>
+        <KEY>096bc20b6debed03</KEY>
       </DELEGATION_KEY>
     </DATA>
   </RECORD>
@@ -37,18 +37,18 @@
       <INODEID>16386</INODEID>
       <PATH>/file_create</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1390251365583</MTIME>
-      <ATIME>1390251365583</ATIME>
+      <MTIME>1389828264873</MTIME>
+      <ATIME>1389828264873</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_382541401_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_16108824_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>andrew</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
-      <RPC_CALLID>7</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>9</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -59,13 +59,13 @@
       <INODEID>0</INODEID>
       <PATH>/file_create</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1390251365626</MTIME>
-      <ATIME>1390251365583</ATIME>
+      <MTIME>1389828265699</MTIME>
+      <ATIME>1389828264873</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>andrew</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -78,9 +78,9 @@
       <LENGTH>0</LENGTH>
       <SRC>/file_create</SRC>
       <DST>/file_moved</DST>
-      <TIMESTAMP>1390251365645</TIMESTAMP>
-      <RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
-      <RPC_CALLID>9</RPC_CALLID>
+      <TIMESTAMP>1389828265705</TIMESTAMP>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>11</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -89,9 +89,9 @@
       <TXID>7</TXID>
       <LENGTH>0</LENGTH>
       <PATH>/file_moved</PATH>
-      <TIMESTAMP>1390251365666</TIMESTAMP>
-      <RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
-      <RPC_CALLID>10</RPC_CALLID>
+      <TIMESTAMP>1389828265712</TIMESTAMP>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>12</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -101,9 +101,9 @@
       <LENGTH>0</LENGTH>
       <INODEID>16387</INODEID>
       <PATH>/directory_mkdir</PATH>
-      <TIMESTAMP>1390251365693</TIMESTAMP>
+      <TIMESTAMP>1389828265722</TIMESTAMP>
       <PERMISSION_STATUS>
-        <USERNAME>andrew</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>493</MODE>
       </PERMISSION_STATUS>
@@ -136,8 +136,8 @@
       <TXID>12</TXID>
       <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
       <SNAPSHOTNAME>snapshot1</SNAPSHOTNAME>
-      <RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
-      <RPC_CALLID>15</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>17</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -147,8 +147,8 @@
       <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
       <SNAPSHOTOLDNAME>snapshot1</SNAPSHOTOLDNAME>
       <SNAPSHOTNEWNAME>snapshot2</SNAPSHOTNEWNAME>
-      <RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
-      <RPC_CALLID>16</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>18</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -157,8 +157,8 @@
       <TXID>14</TXID>
       <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
       <SNAPSHOTNAME>snapshot2</SNAPSHOTNAME>
-      <RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
-      <RPC_CALLID>17</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>19</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -169,18 +169,18 @@
       <INODEID>16388</INODEID>
       <PATH>/file_create</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1390251365804</MTIME>
-      <ATIME>1390251365804</ATIME>
+      <MTIME>1389828265757</MTIME>
+      <ATIME>1389828265757</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_382541401_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_16108824_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>andrew</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
-      <RPC_CALLID>18</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>20</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -191,13 +191,13 @@
       <INODEID>0</INODEID>
       <PATH>/file_create</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1390251365815</MTIME>
-      <ATIME>1390251365804</ATIME>
+      <MTIME>1389828265759</MTIME>
+      <ATIME>1389828265757</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>andrew</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -253,10 +253,10 @@
       <LENGTH>0</LENGTH>
       <SRC>/file_create</SRC>
       <DST>/file_moved</DST>
-      <TIMESTAMP>1390251365931</TIMESTAMP>
+      <TIMESTAMP>1389828265782</TIMESTAMP>
       <OPTIONS>NONE</OPTIONS>
-      <RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
-      <RPC_CALLID>25</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>27</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -267,18 +267,18 @@
       <INODEID>16389</INODEID>
       <PATH>/file_concat_target</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1390251365952</MTIME>
-      <ATIME>1390251365952</ATIME>
+      <MTIME>1389828265787</MTIME>
+      <ATIME>1389828265787</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_382541401_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_16108824_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>andrew</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
-      <RPC_CALLID>27</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>29</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -383,8 +383,8 @@
       <INODEID>0</INODEID>
       <PATH>/file_concat_target</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1390251366514</MTIME>
-      <ATIME>1390251365952</ATIME>
+      <MTIME>1389828266540</MTIME>
+      <ATIME>1389828265787</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
@@ -404,7 +404,7 @@
         <GENSTAMP>1003</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>andrew</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -418,18 +418,18 @@
       <INODEID>16390</INODEID>
       <PATH>/file_concat_0</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1390251366533</MTIME>
-      <ATIME>1390251366533</ATIME>
+      <MTIME>1389828266544</MTIME>
+      <ATIME>1389828266544</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_382541401_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_16108824_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>andrew</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
-      <RPC_CALLID>40</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>41</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -534,8 +534,8 @@
       <INODEID>0</INODEID>
       <PATH>/file_concat_0</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1390251366726</MTIME>
-      <ATIME>1390251366533</ATIME>
+      <MTIME>1389828266569</MTIME>
+      <ATIME>1389828266544</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
@@ -555,7 +555,7 @@
         <GENSTAMP>1006</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>andrew</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -569,18 +569,18 @@
       <INODEID>16391</INODEID>
       <PATH>/file_concat_1</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1390251366746</MTIME>
-      <ATIME>1390251366746</ATIME>
+      <MTIME>1389828266572</MTIME>
+      <ATIME>1389828266572</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_382541401_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_16108824_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>andrew</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
-      <RPC_CALLID>52</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>53</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -685,8 +685,8 @@
       <INODEID>0</INODEID>
       <PATH>/file_concat_1</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1390251366795</MTIME>
-      <ATIME>1390251366746</ATIME>
+      <MTIME>1389828266599</MTIME>
+      <ATIME>1389828266572</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
@@ -706,7 +706,7 @@
         <GENSTAMP>1009</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>andrew</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -718,13 +718,13 @@
       <TXID>56</TXID>
       <LENGTH>0</LENGTH>
       <TRG>/file_concat_target</TRG>
-      <TIMESTAMP>1390251366802</TIMESTAMP>
+      <TIMESTAMP>1389828266603</TIMESTAMP>
       <SOURCES>
         <SOURCE1>/file_concat_0</SOURCE1>
         <SOURCE2>/file_concat_1</SOURCE2>
       </SOURCES>
-      <RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
-      <RPC_CALLID>63</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>64</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -735,15 +735,15 @@
       <INODEID>16392</INODEID>
       <PATH>/file_symlink</PATH>
       <VALUE>/file_concat_target</VALUE>
-      <MTIME>1390251366811</MTIME>
-      <ATIME>1390251366811</ATIME>
+      <MTIME>1389828266633</MTIME>
+      <ATIME>1389828266633</ATIME>
       <PERMISSION_STATUS>
-        <USERNAME>andrew</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>511</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
-      <RPC_CALLID>64</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>66</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -754,18 +754,18 @@
       <INODEID>16393</INODEID>
       <PATH>/hard-lease-recovery-test</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1390251366819</MTIME>
-      <ATIME>1390251366819</ATIME>
+      <MTIME>1389828266637</MTIME>
+      <ATIME>1389828266637</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_382541401_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_16108824_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>andrew</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
-      <RPC_CALLID>65</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>67</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -821,23 +821,7 @@
     <OPCODE>OP_REASSIGN_LEASE</OPCODE>
     <DATA>
       <TXID>64</TXID>
-      <LEASEHOLDER>DFSClient_NONMAPREDUCE_382541401_1</LEASEHOLDER>
-      <PATH>/hard-lease-recovery-test</PATH>
-      <NEWHOLDER>HDFS_NameNode</NEWHOLDER>
-    </DATA>
-  </RECORD>
-  <RECORD>
-    <OPCODE>OP_SET_GENSTAMP_V2</OPCODE>
-    <DATA>
-      <TXID>65</TXID>
-      <GENSTAMPV2>1012</GENSTAMPV2>
-    </DATA>
-  </RECORD>
-  <RECORD>
-    <OPCODE>OP_REASSIGN_LEASE</OPCODE>
-    <DATA>
-      <TXID>66</TXID>
-      <LEASEHOLDER>HDFS_NameNode</LEASEHOLDER>
+      <LEASEHOLDER>DFSClient_NONMAPREDUCE_16108824_1</LEASEHOLDER>
       <PATH>/hard-lease-recovery-test</PATH>
       <NEWHOLDER>HDFS_NameNode</NEWHOLDER>
     </DATA>
@@ -845,23 +829,23 @@
   <RECORD>
     <OPCODE>OP_CLOSE</OPCODE>
     <DATA>
-      <TXID>67</TXID>
+      <TXID>65</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>0</INODEID>
       <PATH>/hard-lease-recovery-test</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1390251371402</MTIME>
-      <ATIME>1390251366819</ATIME>
+      <MTIME>1389828269751</MTIME>
+      <ATIME>1389828266637</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
       <BLOCK>
         <BLOCK_ID>1073741834</BLOCK_ID>
         <NUM_BYTES>11</NUM_BYTES>
-        <GENSTAMP>1012</GENSTAMP>
+        <GENSTAMP>1011</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>andrew</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -870,72 +854,79 @@
   <RECORD>
     <OPCODE>OP_ADD_CACHE_POOL</OPCODE>
     <DATA>
-      <TXID>68</TXID>
+      <TXID>66</TXID>
       <POOLNAME>pool1</POOLNAME>
-      <OWNERNAME>andrew</OWNERNAME>
-      <GROUPNAME>andrew</GROUPNAME>
+      <OWNERNAME>jing</OWNERNAME>
+      <GROUPNAME>staff</GROUPNAME>
       <MODE>493</MODE>
       <LIMIT>9223372036854775807</LIMIT>
       <MAXRELATIVEEXPIRY>2305843009213693951</MAXRELATIVEEXPIRY>
-      <RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
-      <RPC_CALLID>73</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>74</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_MODIFY_CACHE_POOL</OPCODE>
     <DATA>
-      <TXID>69</TXID>
+      <TXID>67</TXID>
       <POOLNAME>pool1</POOLNAME>
       <LIMIT>99</LIMIT>
-      <RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
-      <RPC_CALLID>74</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>75</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ADD_CACHE_DIRECTIVE</OPCODE>
     <DATA>
-      <TXID>70</TXID>
+      <TXID>68</TXID>
       <ID>1</ID>
       <PATH>/path</PATH>
       <REPLICATION>1</REPLICATION>
       <POOL>pool1</POOL>
-      <EXPIRATION>2305844399465065912</EXPIRATION>
-      <RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
-      <RPC_CALLID>75</RPC_CALLID>
+      <EXPIRATION>2305844399041964876</EXPIRATION>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>76</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_MODIFY_CACHE_DIRECTIVE</OPCODE>
     <DATA>
-      <TXID>71</TXID>
+      <TXID>69</TXID>
       <ID>1</ID>
       <REPLICATION>2</REPLICATION>
-      <RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
-      <RPC_CALLID>76</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>77</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_REMOVE_CACHE_DIRECTIVE</OPCODE>
     <DATA>
-      <TXID>72</TXID>
+      <TXID>70</TXID>
       <ID>1</ID>
-      <RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
-      <RPC_CALLID>77</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>78</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_REMOVE_CACHE_POOL</OPCODE>
     <DATA>
-      <TXID>73</TXID>
+      <TXID>71</TXID>
       <POOLNAME>pool1</POOLNAME>
-      <RPC_CLIENTID>ff209a09-9745-4242-837f-21c6b95a1b70</RPC_CLIENTID>
-      <RPC_CALLID>78</RPC_CALLID>
+      <RPC_CLIENTID>b5928e80-e373-4807-a688-f94483d08ce5</RPC_CLIENTID>
+      <RPC_CALLID>79</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_END_LOG_SEGMENT</OPCODE>
     <DATA>
-      <TXID>74</TXID>
+      <TXID>72</TXID>
+    </DATA>
+  </RECORD>
+  <RECORD>
+    <OPCODE>OP_SET_ACL</OPCODE>
+    <DATA>
+      <TXID>73</TXID>
+      <SRC>/file_set_acl</SRC>
     </DATA>
   </RECORD>
 </EDITS>



Mime
View raw message