hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cnaur...@apache.org
Subject svn commit: r1572308 [3/4] - in /hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/protocol/ src/main/java/org/apache/hadoop/hdfs/protocolPB/ sr...
Date Wed, 26 Feb 2014 22:32:30 GMT
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm?rev=1572308&r1=1572307&r2=1572308&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm Wed Feb 26 22:32:27 2014
@@ -752,6 +752,148 @@ Content-Length: 0
    {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.setTimes
 
 
+** {Modify ACL Entries}
+
+  * Submit a HTTP PUT request.
+
++---------------------------------
+curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=MODIFYACLENTRIES
+                              &aclspec=<ACLSPEC>"
++---------------------------------
+
+  The client receives a response with zero content length:
+
++---------------------------------
+HTTP/1.1 200 OK
+Content-Length: 0
++---------------------------------
+
+  []
+
+  See also:
+  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.modifyAclEntries
+
+
+** {Remove ACL Entries}
+
+  * Submit a HTTP PUT request.
+
++---------------------------------
+curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=REMOVEACLENTRIES
+                              &aclspec=<ACLSPEC>"
++---------------------------------
+
+  The client receives a response with zero content length:
+
++---------------------------------
+HTTP/1.1 200 OK
+Content-Length: 0
++---------------------------------
+
+  []
+
+  See also:
+  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.removeAclEntries
+
+
+** {Remove Default ACL}
+
+  * Submit a HTTP PUT request.
+
++---------------------------------
+curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=REMOVEDEFAULTACL"
++---------------------------------
+
+  The client receives a response with zero content length:
+
++---------------------------------
+HTTP/1.1 200 OK
+Content-Length: 0
++---------------------------------
+
+  []
+
+  See also:
+  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.removeDefaultAcl
+
+
+** {Remove ACL}
+
+  * Submit a HTTP PUT request.
+
++---------------------------------
+curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=REMOVEACL"
++---------------------------------
+
+  The client receives a response with zero content length:
+
++---------------------------------
+HTTP/1.1 200 OK
+Content-Length: 0
++---------------------------------
+
+  []
+
+  See also:
+  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.removeAcl
+
+
+** {Set ACL}
+
+  * Submit a HTTP PUT request.
+
++---------------------------------
+curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=SETACL
+                              &aclspec=<ACLSPEC>"
++---------------------------------
+
+  The client receives a response with zero content length:
+
++---------------------------------
+HTTP/1.1 200 OK
+Content-Length: 0
++---------------------------------
+
+  []
+
+  See also:
+  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.setAcl
+
+
+** {Get ACL Status}
+
+  * Submit a HTTP GET request.
+
++---------------------------------
+curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=GETACLSTATUS"
++---------------------------------
+
+  The client receives a response with a {{{ACL Status JSON Schema}<<<AclStatus>>> JSON object}}:
+
++---------------------------------
+HTTP/1.1 200 OK
+Content-Type: application/json
+Transfer-Encoding: chunked
+
+{
+    "AclStatus": {
+        "entries": [
+            "user:carla:rw-", 
+            "group::r-x"
+        ], 
+        "group": "supergroup", 
+        "owner": "hadoop", 
+        "stickyBit": false
+    }
+}
++---------------------------------
+
+  []
+
+  See also:
+  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getAclStatus
+
+
 * {Delegation Token Operations}
 
 ** {Get Delegation Token}
@@ -980,6 +1122,52 @@ Transfer-Encoding: chunked
   However, if additional properties are included in the responses, they are
   considered as optional properties in order to maintain compatibility.
 
+** {ACL Status JSON Schema}
+
++---------------------------------
+{
+  "name"      : "AclStatus",
+  "properties":
+  {
+    "AclStatus":
+    {
+      "type"      : "object",
+      "properties":
+      {
+        "entries":
+        {
+          "type": "array"
+          "items":
+          {
+            "description": "ACL entry.",
+            "type": "string"
+          }
+        },
+        "group":
+        {
+          "description": "The group owner.",
+          "type"       : "string",
+          "required"   : true
+        },
+        "owner":
+        {
+          "description": "The user who is the owner.",
+          "type"       : "string",
+          "required"   : true
+        },
+        "stickyBit":
+        {
+          "description": "True if the sticky bit is on.",
+          "type"       : "boolean",
+          "required"   : true
+        },
+      }
+    }
+  }
+}
++---------------------------------
+
+
 ** {Boolean JSON Schema}
 
 +---------------------------------
@@ -1387,6 +1575,23 @@ var tokenProperties =
 
 * {HTTP Query Parameter Dictionary}
 
+** {ACL Spec}
+
+*----------------+-------------------------------------------------------------------+
+|| Name          | <<<aclspec>>> |
+*----------------+-------------------------------------------------------------------+
+|| Description   | The ACL spec included in ACL modification operations. |
+*----------------+-------------------------------------------------------------------+
+|| Type          | String |
+*----------------+-------------------------------------------------------------------+
+|| Default Value | \<empty\> |
+*----------------+-------------------------------------------------------------------+
+|| Valid Values  | See {{{./HdfsPermissionsGuide.html}Permissions and HDFS}}. |
+*----------------+-------------------------------------------------------------------+
+|| Syntax        | See {{{./HdfsPermissionsGuide.html}Permissions and HDFS}}. |
+*----------------+-------------------------------------------------------------------+
+
+
 ** {Access Time}
 
 *----------------+-------------------------------------------------------------------+

Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1547224-1569863

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java?rev=1572308&r1=1572307&r2=1572308&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java Wed Feb 26 22:32:27 2014
@@ -17,15 +17,21 @@
  */
 package org.apache.hadoop.fs.permission;
 
+import static org.apache.hadoop.fs.permission.AclEntryScope.*;
+import static org.apache.hadoop.fs.permission.AclEntryType.*;
+import static org.apache.hadoop.fs.permission.FsAction.*;
+import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
+import java.util.Arrays;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -33,8 +39,12 @@ import org.apache.hadoop.hdfs.DFSTestUti
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.Test;
 
 public class TestStickyBit {
@@ -43,56 +53,89 @@ public class TestStickyBit {
     UserGroupInformation.createUserForTesting("theDoctor", new String[] {"tardis"});
   static UserGroupInformation user2 = 
     UserGroupInformation.createUserForTesting("rose", new String[] {"powellestates"});
-  
+
+  private static MiniDFSCluster cluster;
+  private static Configuration conf;
+  private static FileSystem hdfs;
+  private static FileSystem hdfsAsUser1;
+  private static FileSystem hdfsAsUser2;
+
+  @BeforeClass
+  public static void init() throws Exception {
+    conf = new HdfsConfiguration();
+    conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+    initCluster(true);
+  }
+
+  private static void initCluster(boolean format) throws Exception {
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).format(format)
+      .build();
+    hdfs = cluster.getFileSystem();
+    assertTrue(hdfs instanceof DistributedFileSystem);
+    hdfsAsUser1 = DFSTestUtil.getFileSystemAs(user1, conf);
+    assertTrue(hdfsAsUser1 instanceof DistributedFileSystem);
+    hdfsAsUser2 = DFSTestUtil.getFileSystemAs(user2, conf);
+    assertTrue(hdfsAsUser2 instanceof DistributedFileSystem);
+  }
+
+  @Before
+  public void setup() throws Exception {
+    if (hdfs != null) {
+      for (FileStatus stat: hdfs.listStatus(new Path("/"))) {
+        hdfs.delete(stat.getPath(), true);
+      }
+    }
+  }
+
+  @AfterClass
+  public static void shutdown() throws Exception {
+    IOUtils.cleanup(null, hdfs, hdfsAsUser1, hdfsAsUser2);
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
   /**
    * Ensure that even if a file is in a directory with the sticky bit on,
    * another user can write to that file (assuming correct permissions).
    */
-  private void confirmCanAppend(Configuration conf, FileSystem hdfs,
-      Path baseDir) throws IOException, InterruptedException {
-    // Create a tmp directory with wide-open permissions and sticky bit
-    Path p = new Path(baseDir, "tmp");
-
-    hdfs.mkdirs(p);
-    hdfs.setPermission(p, new FsPermission((short) 01777));
-
+  private void confirmCanAppend(Configuration conf, Path p) throws Exception {
     // Write a file to the new tmp directory as a regular user
-    hdfs = DFSTestUtil.getFileSystemAs(user1, conf);
     Path file = new Path(p, "foo");
-    writeFile(hdfs, file);
-    hdfs.setPermission(file, new FsPermission((short) 0777));
+    writeFile(hdfsAsUser1, file);
+    hdfsAsUser1.setPermission(file, new FsPermission((short) 0777));
 
     // Log onto cluster as another user and attempt to append to file
-    hdfs = DFSTestUtil.getFileSystemAs(user2, conf);
     Path file2 = new Path(p, "foo");
-    FSDataOutputStream h = hdfs.append(file2);
-    h.write("Some more data".getBytes());
-    h.close();
+    FSDataOutputStream h = null;
+    try {
+      h = hdfsAsUser2.append(file2);
+      h.write("Some more data".getBytes());
+      h.close();
+      h = null;
+    } finally {
+      IOUtils.cleanup(null, h);
+    }
   }
 
   /**
    * Test that one user can't delete another user's file when the sticky bit is
    * set.
    */
-  private void confirmDeletingFiles(Configuration conf, FileSystem hdfs,
-      Path baseDir) throws IOException, InterruptedException {
-    Path p = new Path(baseDir, "contemporary");
-    hdfs.mkdirs(p);
-    hdfs.setPermission(p, new FsPermission((short) 01777));
-
+  private void confirmDeletingFiles(Configuration conf, Path p)
+      throws Exception {
     // Write a file to the new temp directory as a regular user
-    hdfs = DFSTestUtil.getFileSystemAs(user1, conf);
     Path file = new Path(p, "foo");
-    writeFile(hdfs, file);
+    writeFile(hdfsAsUser1, file);
 
     // Make sure the correct user is the owner
-    assertEquals(user1.getShortUserName(), hdfs.getFileStatus(file).getOwner());
+    assertEquals(user1.getShortUserName(),
+      hdfsAsUser1.getFileStatus(file).getOwner());
 
     // Log onto cluster as another user and attempt to delete the file
-    FileSystem hdfs2 = DFSTestUtil.getFileSystemAs(user2, conf);
-
     try {
-      hdfs2.delete(file, false);
+      hdfsAsUser2.delete(file, false);
       fail("Shouldn't be able to delete someone else's file with SB on");
     } catch (IOException ioe) {
       assertTrue(ioe instanceof AccessControlException);
@@ -105,13 +148,8 @@ public class TestStickyBit {
    * on, the new directory does not automatically get a sticky bit, as is
    * standard Unix behavior
    */
-  private void confirmStickyBitDoesntPropagate(FileSystem hdfs, Path baseDir)
+  private void confirmStickyBitDoesntPropagate(FileSystem hdfs, Path p)
       throws IOException {
-    Path p = new Path(baseDir, "scissorsisters");
-
-    // Turn on its sticky bit
-    hdfs.mkdirs(p, new FsPermission((short) 01666));
-
     // Create a subdirectory within it
     Path p2 = new Path(p, "bar");
     hdfs.mkdirs(p2);
@@ -123,23 +161,19 @@ public class TestStickyBit {
   /**
    * Test basic ability to get and set sticky bits on files and directories.
    */
-  private void confirmSettingAndGetting(FileSystem hdfs, Path baseDir)
+  private void confirmSettingAndGetting(FileSystem hdfs, Path p, Path baseDir)
       throws IOException {
-    Path p1 = new Path(baseDir, "roguetraders");
-
-    hdfs.mkdirs(p1);
-
     // Initially sticky bit should not be set
-    assertFalse(hdfs.getFileStatus(p1).getPermission().getStickyBit());
+    assertFalse(hdfs.getFileStatus(p).getPermission().getStickyBit());
 
     // Same permission, but with sticky bit on
     short withSB;
-    withSB = (short) (hdfs.getFileStatus(p1).getPermission().toShort() | 01000);
+    withSB = (short) (hdfs.getFileStatus(p).getPermission().toShort() | 01000);
 
     assertTrue((new FsPermission(withSB)).getStickyBit());
 
-    hdfs.setPermission(p1, new FsPermission(withSB));
-    assertTrue(hdfs.getFileStatus(p1).getPermission().getStickyBit());
+    hdfs.setPermission(p, new FsPermission(withSB));
+    assertTrue(hdfs.getFileStatus(p).getPermission().getStickyBit());
 
     // Write a file to the fs, try to set its sticky bit
     Path f = new Path(baseDir, "somefile");
@@ -154,37 +188,78 @@ public class TestStickyBit {
   }
 
   @Test
-  public void testGeneralSBBehavior() throws IOException, InterruptedException {
-    MiniDFSCluster cluster = null;
-    try {
-      Configuration conf = new HdfsConfiguration();
-      conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+  public void testGeneralSBBehavior() throws Exception {
+    Path baseDir = new Path("/mcgann");
+    hdfs.mkdirs(baseDir);
+
+    // Create a tmp directory with wide-open permissions and sticky bit
+    Path p = new Path(baseDir, "tmp");
+
+    hdfs.mkdirs(p);
+    hdfs.setPermission(p, new FsPermission((short) 01777));
+
+    confirmCanAppend(conf, p);
 
-      FileSystem hdfs = cluster.getFileSystem();
+    baseDir = new Path("/eccleston");
+    hdfs.mkdirs(baseDir);
+    p = new Path(baseDir, "roguetraders");
 
-      assertTrue(hdfs instanceof DistributedFileSystem);
+    hdfs.mkdirs(p);
+    confirmSettingAndGetting(hdfs, p, baseDir);
 
-      Path baseDir = new Path("/mcgann");
-      hdfs.mkdirs(baseDir);
-      confirmCanAppend(conf, hdfs, baseDir);
+    baseDir = new Path("/tennant");
+    hdfs.mkdirs(baseDir);
+    p = new Path(baseDir, "contemporary");
+    hdfs.mkdirs(p);
+    hdfs.setPermission(p, new FsPermission((short) 01777));
+    confirmDeletingFiles(conf, p);
 
-      baseDir = new Path("/eccleston");
-      hdfs.mkdirs(baseDir);
-      confirmSettingAndGetting(hdfs, baseDir);
+    baseDir = new Path("/smith");
+    hdfs.mkdirs(baseDir);
+    p = new Path(baseDir, "scissorsisters");
 
-      baseDir = new Path("/tennant");
-      hdfs.mkdirs(baseDir);
-      confirmDeletingFiles(conf, hdfs, baseDir);
+    // Turn on its sticky bit
+    hdfs.mkdirs(p, new FsPermission((short) 01666));
+    confirmStickyBitDoesntPropagate(hdfs, baseDir);
+  }
 
-      baseDir = new Path("/smith");
-      hdfs.mkdirs(baseDir);
-      confirmStickyBitDoesntPropagate(hdfs, baseDir);
+  @Test
+  public void testAclGeneralSBBehavior() throws Exception {
+    Path baseDir = new Path("/mcgann");
+    hdfs.mkdirs(baseDir);
 
-    } finally {
-      if (cluster != null)
-        cluster.shutdown();
-    }
+    // Create a tmp directory with wide-open permissions and sticky bit
+    Path p = new Path(baseDir, "tmp");
+
+    hdfs.mkdirs(p);
+    hdfs.setPermission(p, new FsPermission((short) 01777));
+    applyAcl(p);
+    confirmCanAppend(conf, p);
+
+    baseDir = new Path("/eccleston");
+    hdfs.mkdirs(baseDir);
+    p = new Path(baseDir, "roguetraders");
+
+    hdfs.mkdirs(p);
+    applyAcl(p);
+    confirmSettingAndGetting(hdfs, p, baseDir);
+
+    baseDir = new Path("/tennant");
+    hdfs.mkdirs(baseDir);
+    p = new Path(baseDir, "contemporary");
+    hdfs.mkdirs(p);
+    hdfs.setPermission(p, new FsPermission((short) 01777));
+    applyAcl(p);
+    confirmDeletingFiles(conf, p);
+
+    baseDir = new Path("/smith");
+    hdfs.mkdirs(baseDir);
+    p = new Path(baseDir, "scissorsisters");
+
+    // Turn on its sticky bit
+    hdfs.mkdirs(p, new FsPermission((short) 01666));
+    applyAcl(p);
+    confirmStickyBitDoesntPropagate(hdfs, p);
   }
 
   /**
@@ -192,46 +267,42 @@ public class TestStickyBit {
    * bit is set.
    */
   @Test
-  public void testMovingFiles() throws IOException, InterruptedException {
-    MiniDFSCluster cluster = null;
+  public void testMovingFiles() throws Exception {
+    testMovingFiles(false);
+  }
+
+  @Test
+  public void testAclMovingFiles() throws Exception {
+    testMovingFiles(true);
+  }
+
+  private void testMovingFiles(boolean useAcl) throws Exception {
+    // Create a tmp directory with wide-open permissions and sticky bit
+    Path tmpPath = new Path("/tmp");
+    Path tmpPath2 = new Path("/tmp2");
+    hdfs.mkdirs(tmpPath);
+    hdfs.mkdirs(tmpPath2);
+    hdfs.setPermission(tmpPath, new FsPermission((short) 01777));
+    if (useAcl) {
+      applyAcl(tmpPath);
+    }
+    hdfs.setPermission(tmpPath2, new FsPermission((short) 01777));
+    if (useAcl) {
+      applyAcl(tmpPath2);
+    }
+
+    // Write a file to the new tmp directory as a regular user
+    Path file = new Path(tmpPath, "foo");
 
+    writeFile(hdfsAsUser1, file);
+
+    // Log onto cluster as another user and attempt to move the file
     try {
-      // Set up cluster for testing
-      Configuration conf = new HdfsConfiguration();
-      conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
-      FileSystem hdfs = cluster.getFileSystem();
-
-      assertTrue(hdfs instanceof DistributedFileSystem);
-
-      // Create a tmp directory with wide-open permissions and sticky bit
-      Path tmpPath = new Path("/tmp");
-      Path tmpPath2 = new Path("/tmp2");
-      hdfs.mkdirs(tmpPath);
-      hdfs.mkdirs(tmpPath2);
-      hdfs.setPermission(tmpPath, new FsPermission((short) 01777));
-      hdfs.setPermission(tmpPath2, new FsPermission((short) 01777));
-
-      // Write a file to the new tmp directory as a regular user
-      Path file = new Path(tmpPath, "foo");
-
-      FileSystem hdfs2 = DFSTestUtil.getFileSystemAs(user1, conf);
-
-      writeFile(hdfs2, file);
-
-      // Log onto cluster as another user and attempt to move the file
-      FileSystem hdfs3 = DFSTestUtil.getFileSystemAs(user2, conf);
-
-      try {
-        hdfs3.rename(file, new Path(tmpPath2, "renamed"));
-        fail("Shouldn't be able to rename someone else's file with SB on");
-      } catch (IOException ioe) {
-        assertTrue(ioe instanceof AccessControlException);
-        assertTrue(ioe.getMessage().contains("sticky bit"));
-      }
-    } finally {
-      if (cluster != null)
-        cluster.shutdown();
+      hdfsAsUser2.rename(file, new Path(tmpPath2, "renamed"));
+      fail("Shouldn't be able to rename someone else's file with SB on");
+    } catch (IOException ioe) {
+      assertTrue(ioe instanceof AccessControlException);
+      assertTrue(ioe.getMessage().contains("sticky bit"));
     }
   }
 
@@ -241,56 +312,91 @@ public class TestStickyBit {
    * re-start.
    */
   @Test
-  public void testStickyBitPersistence() throws IOException {
-    MiniDFSCluster cluster = null;
-    try {
-      Configuration conf = new HdfsConfiguration();
-      conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
-      FileSystem hdfs = cluster.getFileSystem();
-
-      assertTrue(hdfs instanceof DistributedFileSystem);
-
-      // A tale of three directories...
-      Path sbSet = new Path("/Housemartins");
-      Path sbNotSpecified = new Path("/INXS");
-      Path sbSetOff = new Path("/Easyworld");
-
-      for (Path p : new Path[] { sbSet, sbNotSpecified, sbSetOff })
-        hdfs.mkdirs(p);
-
-      // Two directories had there sticky bits set explicitly...
-      hdfs.setPermission(sbSet, new FsPermission((short) 01777));
-      hdfs.setPermission(sbSetOff, new FsPermission((short) 00777));
+  public void testStickyBitPersistence() throws Exception {
+    // A tale of three directories...
+    Path sbSet = new Path("/Housemartins");
+    Path sbNotSpecified = new Path("/INXS");
+    Path sbSetOff = new Path("/Easyworld");
 
-      cluster.shutdown();
+    for (Path p : new Path[] { sbSet, sbNotSpecified, sbSetOff })
+      hdfs.mkdirs(p);
 
-      // Start file system up again
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).format(false).build();
-      hdfs = cluster.getFileSystem();
-
-      assertTrue(hdfs.exists(sbSet));
-      assertTrue(hdfs.getFileStatus(sbSet).getPermission().getStickyBit());
-
-      assertTrue(hdfs.exists(sbNotSpecified));
-      assertFalse(hdfs.getFileStatus(sbNotSpecified).getPermission()
-          .getStickyBit());
+    // Two directories had there sticky bits set explicitly...
+    hdfs.setPermission(sbSet, new FsPermission((short) 01777));
+    hdfs.setPermission(sbSetOff, new FsPermission((short) 00777));
 
-      assertTrue(hdfs.exists(sbSetOff));
-      assertFalse(hdfs.getFileStatus(sbSetOff).getPermission().getStickyBit());
+    shutdown();
 
-    } finally {
-      if (cluster != null)
-        cluster.shutdown();
-    }
+    // Start file system up again
+    initCluster(false);
+
+    assertTrue(hdfs.exists(sbSet));
+    assertTrue(hdfs.getFileStatus(sbSet).getPermission().getStickyBit());
+
+    assertTrue(hdfs.exists(sbNotSpecified));
+    assertFalse(hdfs.getFileStatus(sbNotSpecified).getPermission()
+        .getStickyBit());
+
+    assertTrue(hdfs.exists(sbSetOff));
+    assertFalse(hdfs.getFileStatus(sbSetOff).getPermission().getStickyBit());
+  }
+
+  @Test
+  public void testAclStickyBitPersistence() throws Exception {
+    // A tale of three directories...
+    Path sbSet = new Path("/Housemartins");
+    Path sbNotSpecified = new Path("/INXS");
+    Path sbSetOff = new Path("/Easyworld");
+
+    for (Path p : new Path[] { sbSet, sbNotSpecified, sbSetOff })
+      hdfs.mkdirs(p);
+
+    // Two directories had there sticky bits set explicitly...
+    hdfs.setPermission(sbSet, new FsPermission((short) 01777));
+    applyAcl(sbSet);
+    hdfs.setPermission(sbSetOff, new FsPermission((short) 00777));
+    applyAcl(sbSetOff);
+
+    shutdown();
+
+    // Start file system up again
+    initCluster(false);
+
+    assertTrue(hdfs.exists(sbSet));
+    assertTrue(hdfs.getFileStatus(sbSet).getPermission().getStickyBit());
+
+    assertTrue(hdfs.exists(sbNotSpecified));
+    assertFalse(hdfs.getFileStatus(sbNotSpecified).getPermission()
+        .getStickyBit());
+
+    assertTrue(hdfs.exists(sbSetOff));
+    assertFalse(hdfs.getFileStatus(sbSetOff).getPermission().getStickyBit());
   }
 
   /***
    * Write a quick file to the specified file system at specified path
    */
   static private void writeFile(FileSystem hdfs, Path p) throws IOException {
-    FSDataOutputStream o = hdfs.create(p);
-    o.write("some file contents".getBytes());
-    o.close();
+    FSDataOutputStream o = null;
+    try {
+      o = hdfs.create(p);
+      o.write("some file contents".getBytes());
+      o.close();
+      o = null;
+    } finally {
+      IOUtils.cleanup(null, o);
+    }
+  }
+
+  /**
+   * Applies an ACL (both access and default) to the given path.
+   *
+   * @param p Path to set
+   * @throws IOException if an ACL could not be modified
+   */
+  private static void applyAcl(Path p) throws IOException {
+    hdfs.modifyAclEntries(p, Arrays.asList(
+      aclEntry(ACCESS, USER, user2.getShortUserName(), ALL),
+      aclEntry(DEFAULT, USER, user2.getShortUserName(), ALL)));
   }
 }

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1572308&r1=1572307&r2=1572308&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Wed Feb 26 22:32:27 2014
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
 
 import com.google.common.base.Charsets;
 import com.google.common.base.Joiner;
+import com.google.common.collect.Lists;
 
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.logging.Log;
@@ -29,6 +30,7 @@ import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.Options.Rename;
+import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
@@ -1095,6 +1097,8 @@ public class DFSTestUtil {
     filesystem.removeCacheDirective(id);
     // OP_REMOVE_CACHE_POOL
     filesystem.removeCachePool("pool1");
+    // OP_SET_ACL
+    filesystem.setAcl(pathConcatTarget, Lists.<AclEntry> newArrayList());
   }
 
   public static void abortStream(DFSOutputStream out) throws IOException {

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java?rev=1572308&r1=1572307&r2=1572308&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java Wed Feb 26 22:32:27 2014
@@ -31,6 +31,7 @@ import org.apache.hadoop.fs.FSDataOutput
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
@@ -65,6 +66,7 @@ public class TestSafeMode {
   public void startUp() throws IOException {
     conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
     cluster.waitActive();      
     fs = cluster.getFileSystem();
@@ -328,12 +330,48 @@ public class TestSafeMode {
         fs.setTimes(file1, 0, 0);
       }});
 
+    runFsFun("modifyAclEntries while in SM", new FSRun() {
+      @Override
+      public void run(FileSystem fs) throws IOException {
+        fs.modifyAclEntries(file1, Lists.<AclEntry>newArrayList());
+      }});
+
+    runFsFun("removeAclEntries while in SM", new FSRun() {
+      @Override
+      public void run(FileSystem fs) throws IOException {
+        fs.removeAclEntries(file1, Lists.<AclEntry>newArrayList());
+      }});
+
+    runFsFun("removeDefaultAcl while in SM", new FSRun() {
+      @Override
+      public void run(FileSystem fs) throws IOException {
+        fs.removeDefaultAcl(file1);
+      }});
+
+    runFsFun("removeAcl while in SM", new FSRun() {
+      @Override
+      public void run(FileSystem fs) throws IOException {
+        fs.removeAcl(file1);
+      }});
+
+    runFsFun("setAcl while in SM", new FSRun() {
+      @Override
+      public void run(FileSystem fs) throws IOException {
+        fs.setAcl(file1, Lists.<AclEntry>newArrayList());
+      }});
+
     try {
       DFSTestUtil.readFile(fs, file1);
     } catch (IOException ioe) {
       fail("Set times failed while in SM");
     }
 
+    try {
+      fs.getAclStatus(file1);
+    } catch (IOException ioe) {
+      fail("getAclStatus failed while in SM");
+    }
+
     assertFalse("Could not leave SM",
         dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
   }

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java?rev=1572308&r1=1572307&r2=1572308&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java Wed Feb 26 22:32:27 2014
@@ -26,6 +26,11 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclEntryScope;
+import org.apache.hadoop.fs.permission.AclEntryType;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -68,6 +73,7 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.DataChecksum;
+import org.junit.Assert;
 import org.junit.Test;
 
 import com.google.common.base.Joiner;
@@ -580,4 +586,39 @@ public class TestPBHelper {
     assertEquals(PBHelper.convert(DataChecksum.Type.CRC32C),
         HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32C);
   }
+
+  @Test
+  public void testAclEntryProto() {
+    // All fields populated.
+    AclEntry e1 = new AclEntry.Builder().setName("test")
+        .setPermission(FsAction.READ_EXECUTE).setScope(AclEntryScope.DEFAULT)
+        .setType(AclEntryType.OTHER).build();
+    // No name.
+    AclEntry e2 = new AclEntry.Builder().setScope(AclEntryScope.ACCESS)
+        .setType(AclEntryType.USER).setPermission(FsAction.ALL).build();
+    // No permission, which will default to the 0'th enum element.
+    AclEntry e3 = new AclEntry.Builder().setScope(AclEntryScope.ACCESS)
+        .setType(AclEntryType.USER).setName("test").build();
+    AclEntry[] expected = new AclEntry[] { e1, e2,
+        new AclEntry.Builder()
+            .setScope(e3.getScope())
+            .setType(e3.getType())
+            .setName(e3.getName())
+            .setPermission(FsAction.NONE)
+            .build() };
+    AclEntry[] actual = Lists.newArrayList(
+        PBHelper.convertAclEntry(PBHelper.convertAclEntryProto(Lists
+            .newArrayList(e1, e2, e3)))).toArray(new AclEntry[0]);
+    Assert.assertArrayEquals(expected, actual);
+  }
+
+  @Test
+  public void testAclStatusProto() {
+    AclEntry e = new AclEntry.Builder().setName("test")
+        .setPermission(FsAction.READ_EXECUTE).setScope(AclEntryScope.DEFAULT)
+        .setType(AclEntryType.OTHER).build();
+    AclStatus s = new AclStatus.Builder().owner("foo").group("bar").addEntry(e)
+        .build();
+    Assert.assertEquals(s, PBHelper.convert(PBHelper.convert(s)));
+  }
 }

Copied: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java (from r1569870, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java)
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java?p2=hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java&p1=hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java&r1=1569870&r2=1572308&rev=1572308&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java Wed Feb 26 22:32:27 2014
@@ -27,6 +27,9 @@ import org.apache.hadoop.fs.permission.A
 import org.apache.hadoop.fs.permission.AclEntryScope;
 import org.apache.hadoop.fs.permission.AclEntryType;
 import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
 
 /**
  * Helper methods useful for writing ACL tests.
@@ -101,6 +104,43 @@ public final class AclTestHelpers {
   }
 
   /**
+   * Asserts that permission is denied to the given fs/user for the given file.
+   *
+   * @param fs FileSystem to check
+   * @param user UserGroupInformation owner of fs
+   * @param pathToCheck Path file to check
+   * @throws Exception if there is an unexpected error
+   */
+  public static void assertFilePermissionDenied(FileSystem fs,
+      UserGroupInformation user, Path pathToCheck) throws Exception {
+    try {
+      DFSTestUtil.readFileBuffer(fs, pathToCheck);
+      fail("expected AccessControlException for user " + user + ", path = " +
+        pathToCheck);
+    } catch (AccessControlException e) {
+      // expected
+    }
+  }
+
+  /**
+   * Asserts that permission is granted to the given fs/user for the given file.
+   *
+   * @param fs FileSystem to check
+   * @param user UserGroupInformation owner of fs
+   * @param pathToCheck Path file to check
+   * @throws Exception if there is an unexpected error
+   */
+  public static void assertFilePermissionGranted(FileSystem fs,
+      UserGroupInformation user, Path pathToCheck) throws Exception {
+    try {
+      DFSTestUtil.readFileBuffer(fs, pathToCheck);
+    } catch (AccessControlException e) {
+      fail("expected permission granted for user " + user + ", path = " +
+        pathToCheck);
+    }
+  }
+
+  /**
    * Asserts the value of the FsPermission bits on the inode of a specific path.
    *
    * @param fs FileSystem to use for check

Copied: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java (from r1569870, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java)
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java?p2=hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java&p1=hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java&r1=1569870&r2=1572308&rev=1572308&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java Wed Feb 26 22:32:27 2014
@@ -27,19 +27,28 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.List;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.AclException;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.ExpectedException;
 
+import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
 
 /**
@@ -47,24 +56,42 @@ import com.google.common.collect.Lists;
  * also covers interaction of setPermission with inodes that have ACLs.
  */
 public abstract class FSAclBaseTest {
+  private static final UserGroupInformation BRUCE =
+    UserGroupInformation.createUserForTesting("bruce", new String[] { });
+  private static final UserGroupInformation DIANA =
+    UserGroupInformation.createUserForTesting("diana", new String[] { });
+  private static final UserGroupInformation SUPERGROUP_MEMBER =
+    UserGroupInformation.createUserForTesting("super", new String[] {
+      DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT });
 
   protected static MiniDFSCluster cluster;
-  protected static FileSystem fs;
+  protected static Configuration conf;
   private static int pathCount = 0;
   private static Path path;
 
+  @Rule
+  public ExpectedException exception = ExpectedException.none();
+
+  private FileSystem fs, fsAsBruce, fsAsDiana, fsAsSupergroupMember;
+
   @AfterClass
-  public static void shutdown() throws Exception {
-    IOUtils.cleanup(null, fs);
+  public static void shutdown() {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 
   @Before
-  public void setUp() {
+  public void setUp() throws Exception {
     pathCount += 1;
     path = new Path("/p" + pathCount);
+    initFileSystems();
+  }
+
+  @After
+  public void destroyFileSystems() {
+    IOUtils.cleanup(null, fs, fsAsBruce, fsAsDiana, fsAsSupergroupMember);
+    fs = fsAsBruce = fsAsDiana = fsAsSupergroupMember = null;
   }
 
   @Test
@@ -1036,6 +1063,188 @@ public abstract class FSAclBaseTest {
     assertAclFeature(dirPath, true);
   }
 
+  @Test
+  public void testSkipAclEnforcementPermsDisabled() throws Exception {
+    Path bruceDir = new Path(path, "bruce");
+    Path bruceFile = new Path(bruceDir, "file");
+    fs.mkdirs(bruceDir);
+    fs.setOwner(bruceDir, "bruce", null);
+    fsAsBruce.create(bruceFile).close();
+    fsAsBruce.modifyAclEntries(bruceFile, Lists.newArrayList(
+      aclEntry(ACCESS, USER, "diana", NONE)));
+    assertFilePermissionDenied(fsAsDiana, DIANA, bruceFile);
+    try {
+      conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
+      destroyFileSystems();
+      restartCluster();
+      initFileSystems();
+      assertFilePermissionGranted(fsAsDiana, DIANA, bruceFile);
+    } finally {
+      conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
+      restartCluster();
+    }
+  }
+
+  @Test
+  public void testSkipAclEnforcementSuper() throws Exception {
+    Path bruceDir = new Path(path, "bruce");
+    Path bruceFile = new Path(bruceDir, "file");
+    fs.mkdirs(bruceDir);
+    fs.setOwner(bruceDir, "bruce", null);
+    fsAsBruce.create(bruceFile).close();
+    fsAsBruce.modifyAclEntries(bruceFile, Lists.newArrayList(
+      aclEntry(ACCESS, USER, "diana", NONE)));
+    assertFilePermissionGranted(fs, DIANA, bruceFile);
+    assertFilePermissionGranted(fsAsBruce, DIANA, bruceFile);
+    assertFilePermissionDenied(fsAsDiana, DIANA, bruceFile);
+    assertFilePermissionGranted(fsAsSupergroupMember, SUPERGROUP_MEMBER,
+      bruceFile);
+  }
+
+  @Test
+  public void testModifyAclEntriesMustBeOwnerOrSuper() throws Exception {
+    Path bruceDir = new Path(path, "bruce");
+    Path bruceFile = new Path(bruceDir, "file");
+    fs.mkdirs(bruceDir);
+    fs.setOwner(bruceDir, "bruce", null);
+    fsAsBruce.create(bruceFile).close();
+    List<AclEntry> aclSpec = Lists.newArrayList(
+      aclEntry(ACCESS, USER, "diana", ALL));
+    fsAsBruce.modifyAclEntries(bruceFile, aclSpec);
+    fs.modifyAclEntries(bruceFile, aclSpec);
+    fsAsSupergroupMember.modifyAclEntries(bruceFile, aclSpec);
+    exception.expect(AccessControlException.class);
+    fsAsDiana.modifyAclEntries(bruceFile, aclSpec);
+  }
+
+  @Test
+  public void testRemoveAclEntriesMustBeOwnerOrSuper() throws Exception {
+    Path bruceDir = new Path(path, "bruce");
+    Path bruceFile = new Path(bruceDir, "file");
+    fs.mkdirs(bruceDir);
+    fs.setOwner(bruceDir, "bruce", null);
+    fsAsBruce.create(bruceFile).close();
+    List<AclEntry> aclSpec = Lists.newArrayList(
+      aclEntry(ACCESS, USER, "diana"));
+    fsAsBruce.removeAclEntries(bruceFile, aclSpec);
+    fs.removeAclEntries(bruceFile, aclSpec);
+    fsAsSupergroupMember.removeAclEntries(bruceFile, aclSpec);
+    exception.expect(AccessControlException.class);
+    fsAsDiana.removeAclEntries(bruceFile, aclSpec);
+  }
+
+  @Test
+  public void testRemoveDefaultAclMustBeOwnerOrSuper() throws Exception {
+    Path bruceDir = new Path(path, "bruce");
+    Path bruceFile = new Path(bruceDir, "file");
+    fs.mkdirs(bruceDir);
+    fs.setOwner(bruceDir, "bruce", null);
+    fsAsBruce.create(bruceFile).close();
+    fsAsBruce.removeDefaultAcl(bruceFile);
+    fs.removeDefaultAcl(bruceFile);
+    fsAsSupergroupMember.removeDefaultAcl(bruceFile);
+    exception.expect(AccessControlException.class);
+    fsAsDiana.removeDefaultAcl(bruceFile);
+  }
+
+  @Test
+  public void testRemoveAclMustBeOwnerOrSuper() throws Exception {
+    Path bruceDir = new Path(path, "bruce");
+    Path bruceFile = new Path(bruceDir, "file");
+    fs.mkdirs(bruceDir);
+    fs.setOwner(bruceDir, "bruce", null);
+    fsAsBruce.create(bruceFile).close();
+    fsAsBruce.removeAcl(bruceFile);
+    fs.removeAcl(bruceFile);
+    fsAsSupergroupMember.removeAcl(bruceFile);
+    exception.expect(AccessControlException.class);
+    fsAsDiana.removeAcl(bruceFile);
+  }
+
+  @Test
+  public void testSetAclMustBeOwnerOrSuper() throws Exception {
+    Path bruceDir = new Path(path, "bruce");
+    Path bruceFile = new Path(bruceDir, "file");
+    fs.mkdirs(bruceDir);
+    fs.setOwner(bruceDir, "bruce", null);
+    fsAsBruce.create(bruceFile).close();
+    List<AclEntry> aclSpec = Lists.newArrayList(
+      aclEntry(ACCESS, USER, READ_WRITE),
+      aclEntry(ACCESS, USER, "diana", READ_WRITE),
+      aclEntry(ACCESS, GROUP, READ),
+      aclEntry(ACCESS, OTHER, READ));
+    fsAsBruce.setAcl(bruceFile, aclSpec);
+    fs.setAcl(bruceFile, aclSpec);
+    fsAsSupergroupMember.setAcl(bruceFile, aclSpec);
+    exception.expect(AccessControlException.class);
+    fsAsDiana.setAcl(bruceFile, aclSpec);
+  }
+
+  @Test
+  public void testGetAclStatusRequiresTraverseOrSuper() throws Exception {
+    Path bruceDir = new Path(path, "bruce");
+    Path bruceFile = new Path(bruceDir, "file");
+    fs.mkdirs(bruceDir);
+    fs.setOwner(bruceDir, "bruce", null);
+    fsAsBruce.create(bruceFile).close();
+    fsAsBruce.setAcl(bruceDir, Lists.newArrayList(
+      aclEntry(ACCESS, USER, ALL),
+      aclEntry(ACCESS, USER, "diana", READ),
+      aclEntry(ACCESS, GROUP, NONE),
+      aclEntry(ACCESS, OTHER, NONE)));
+    fsAsBruce.getAclStatus(bruceFile);
+    fs.getAclStatus(bruceFile);
+    fsAsSupergroupMember.getAclStatus(bruceFile);
+    exception.expect(AccessControlException.class);
+    fsAsDiana.getAclStatus(bruceFile);
+  }
+
+  /**
+   * Creates a FileSystem for the super-user.
+   *
+   * @return FileSystem for super-user
+   * @throws Exception if creation fails
+   */
+  protected FileSystem createFileSystem() throws Exception {
+    return cluster.getFileSystem();
+  }
+
+  /**
+   * Creates a FileSystem for a specific user.
+   *
+   * @param user UserGroupInformation specific user
+   * @return FileSystem for specific user
+   * @throws Exception if creation fails
+   */
+  protected FileSystem createFileSystem(UserGroupInformation user)
+      throws Exception {
+    return DFSTestUtil.getFileSystemAs(user, cluster.getConfiguration(0));
+  }
+
+  /**
+   * Initializes all FileSystem instances used in the tests.
+   *
+   * @throws Exception if initialization fails
+   */
+  private void initFileSystems() throws Exception {
+    fs = createFileSystem();
+    fsAsBruce = createFileSystem(BRUCE);
+    fsAsDiana = createFileSystem(DIANA);
+    fsAsSupergroupMember = createFileSystem(SUPERGROUP_MEMBER);
+  }
+
+  /**
+   * Restarts the cluster without formatting, so all data is preserved.
+   *
+   * @throws Exception if restart fails
+   */
+  private void restartCluster() throws Exception {
+    shutdown();
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(false)
+      .build();
+    cluster.waitActive();
+  }
+
   /**
    * Asserts whether or not the inode for the test path has an AclFeature.
    *
@@ -1064,6 +1273,12 @@ public abstract class FSAclBaseTest {
     AclFeature aclFeature = inode.getAclFeature();
     if (expectAclFeature) {
       assertNotNull(aclFeature);
+      // Intentionally capturing a reference to the entries, not using nested
+      // calls.  This way, we get compile-time enforcement that the entries are
+      // stored in an ImmutableList.
+      ImmutableList<AclEntry> entries = aclFeature.getEntries();
+      assertNotNull(entries);
+      assertFalse(entries.isEmpty());
     } else {
       assertNull(aclFeature);
     }
@@ -1075,7 +1290,7 @@ public abstract class FSAclBaseTest {
    * @param perm short expected permission bits
    * @throws IOException thrown if there is an I/O error
    */
-  private static void assertPermission(short perm) throws IOException {
+  private void assertPermission(short perm) throws IOException {
     assertPermission(path, perm);
   }
 
@@ -1086,7 +1301,7 @@ public abstract class FSAclBaseTest {
    * @param perm short expected permission bits
    * @throws IOException thrown if there is an I/O error
    */
-  private static void assertPermission(Path pathToCheck, short perm)
+  private void assertPermission(Path pathToCheck, short perm)
       throws IOException {
     AclTestHelpers.assertPermission(fs, pathToCheck, perm);
   }

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java?rev=1572308&r1=1572307&r2=1572308&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java Wed Feb 26 22:32:27 2014
@@ -96,6 +96,7 @@ public class OfflineEditsViewerHelper {
       "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
     config.setBoolean(
         DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
+    config.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
     cluster =
       new MiniDFSCluster.Builder(config).manageNameDfsDirs(false).build();
     cluster.waitClusterUp();

Copied: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeAcl.java (from r1569870, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeAcl.java)
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeAcl.java?p2=hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeAcl.java&p1=hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeAcl.java&r1=1569870&r2=1572308&rev=1572308&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeAcl.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeAcl.java Wed Feb 26 22:32:27 2014
@@ -17,11 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.*;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.junit.BeforeClass;
 
@@ -33,11 +30,9 @@ public class TestNameNodeAcl extends FSA
 
   @BeforeClass
   public static void init() throws Exception {
-    Configuration conf = new Configuration();
+    conf = new Configuration();
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
     cluster.waitActive();
-    fs = cluster.getFileSystem();
-    assertTrue(fs instanceof DistributedFileSystem);
   }
 }

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java?rev=1572308&r1=1572307&r2=1572308&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java Wed Feb 26 22:32:27 2014
@@ -91,6 +91,7 @@ public class TestNamenodeRetryCache {
     conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
     cluster = new MiniDFSCluster.Builder(conf).build();
     cluster.waitActive();
     namesystem = cluster.getNamesystem();

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java?rev=1572308&r1=1572307&r2=1572308&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java Wed Feb 26 22:32:27 2014
@@ -125,6 +125,7 @@ public class TestRetryCacheWithHA {
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES, ResponseSize);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, ResponseSize);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
     cluster = new MiniDFSCluster.Builder(conf)
         .nnTopology(MiniDFSNNTopology.simpleHATopology())
         .numDataNodes(DataNodes).build();

Copied: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java (from r1569870, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java)
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java?p2=hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java&p1=hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java&r1=1569870&r2=1572308&rev=1572308&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java Wed Feb 26 22:32:27 2014
@@ -696,43 +696,6 @@ public class TestAclWithSnapshot {
   }
 
   /**
-   * Asserts that permission is denied to the given fs/user for the given file.
-   *
-   * @param fs FileSystem to check
-   * @param user UserGroupInformation owner of fs
-   * @param pathToCheck Path file to check
-   * @throws Exception if there is an unexpected error
-   */
-  private static void assertFilePermissionDenied(FileSystem fs,
-      UserGroupInformation user, Path pathToCheck) throws Exception {
-    try {
-      fs.open(pathToCheck).close();
-      fail("expected AccessControlException for user " + user + ", path = " +
-        pathToCheck);
-    } catch (AccessControlException e) {
-      // expected
-    }
-  }
-
-  /**
-   * Asserts that permission is granted to the given fs/user for the given file.
-   *
-   * @param fs FileSystem to check
-   * @param user UserGroupInformation owner of fs
-   * @param pathToCheck Path file to check
-   * @throws Exception if there is an unexpected error
-   */
-  private static void assertFilePermissionGranted(FileSystem fs,
-      UserGroupInformation user, Path pathToCheck) throws Exception {
-    try {
-      fs.open(pathToCheck).close();
-    } catch (AccessControlException e) {
-      fail("expected permission granted for user " + user + ", path = " +
-        pathToCheck);
-    }
-  }
-
-  /**
    * Asserts the value of the FsPermission bits on the inode of the test path.
    *
    * @param perm short expected permission bits

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java?rev=1572308&r1=1572307&r2=1572308&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java Wed Feb 26 22:32:27 2014
@@ -305,7 +305,8 @@ public class TestDiff {
     final int i = Diff.search(current, inode.getKey());
     Assert.assertTrue(i >= 0);
     final INodeDirectory oldinode = (INodeDirectory)current.get(i);
-    final INodeDirectory newinode = new INodeDirectory(oldinode, false, true);
+    final INodeDirectory newinode = new INodeDirectory(oldinode, false,
+      oldinode.getFeatures());
     newinode.setModificationTime(oldinode.getModificationTime() + 1);
 
     current.set(i, newinode);

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java?rev=1572308&r1=1572307&r2=1572308&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java Wed Feb 26 22:32:27 2014
@@ -17,11 +17,19 @@
  */
 package org.apache.hadoop.hdfs.web;
 
+import static org.apache.hadoop.fs.permission.AclEntryScope.*;
+import static org.apache.hadoop.fs.permission.AclEntryType.*;
+import static org.apache.hadoop.fs.permission.FsAction.*;
+import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
+
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -32,6 +40,8 @@ import org.junit.Assert;
 import org.junit.Test;
 import org.mortbay.util.ajax.JSON;
 
+import com.google.common.collect.Lists;
+
 public class TestJsonUtil {
   static FileStatus toFileStatus(HdfsFileStatus f, String parent) {
     return new FileStatus(f.getLen(), f.isDir(), f.getReplication(),
@@ -135,6 +145,47 @@ public class TestJsonUtil {
     response.put("ipAddr", "127.0.0.1");
     checkDecodeFailure(response);
   }
+  
+  @Test
+  public void testToAclStatus() {
+    String jsonString =
+        "{\"AclStatus\":{\"entries\":[\"user::rwx\",\"user:user1:rw-\",\"group::rw-\",\"other::r-x\"],\"group\":\"supergroup\",\"owner\":\"testuser\",\"stickyBit\":false}}";
+    Map<?, ?> json = (Map<?, ?>) JSON.parse(jsonString);
+
+    List<AclEntry> aclSpec =
+        Lists.newArrayList(aclEntry(ACCESS, USER, ALL),
+            aclEntry(ACCESS, USER, "user1", READ_WRITE),
+            aclEntry(ACCESS, GROUP, READ_WRITE),
+            aclEntry(ACCESS, OTHER, READ_EXECUTE));
+
+    AclStatus.Builder aclStatusBuilder = new AclStatus.Builder();
+    aclStatusBuilder.owner("testuser");
+    aclStatusBuilder.group("supergroup");
+    aclStatusBuilder.addEntries(aclSpec);
+    aclStatusBuilder.stickyBit(false);
+
+    Assert.assertEquals("Should be equal", aclStatusBuilder.build(),
+        JsonUtil.toAclStatus(json));
+  }
+
+  @Test
+  public void testToJsonFromAclStatus() {
+    String jsonString =
+        "{\"AclStatus\":{\"entries\":[\"user:user1:rwx\",\"group::rw-\"],\"group\":\"supergroup\",\"owner\":\"testuser\",\"stickyBit\":false}}";
+    AclStatus.Builder aclStatusBuilder = new AclStatus.Builder();
+    aclStatusBuilder.owner("testuser");
+    aclStatusBuilder.group("supergroup");
+    aclStatusBuilder.stickyBit(false);
+
+    List<AclEntry> aclSpec =
+        Lists.newArrayList(aclEntry(ACCESS, USER,"user1", ALL),
+            aclEntry(ACCESS, GROUP, READ_WRITE));
+
+    aclStatusBuilder.addEntries(aclSpec);
+    Assert.assertEquals(jsonString,
+        JsonUtil.toJsonString(aclStatusBuilder.build()));
+
+  }
 
   private void checkDecodeFailure(Map<String, Object> map) {
     try {

Copied: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSAcl.java (from r1569870, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSAcl.java)
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSAcl.java?p2=hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSAcl.java&p1=hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSAcl.java&r1=1569870&r2=1572308&rev=1572308&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSAcl.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSAcl.java Wed Feb 26 22:32:27 2014
@@ -17,12 +17,11 @@
  */
 package org.apache.hadoop.hdfs.web;
 
-import static org.junit.Assert.*;
-
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.namenode.FSAclBaseTest;
+import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Test;
@@ -34,12 +33,10 @@ public class TestWebHDFSAcl extends FSAc
 
   @BeforeClass
   public static void init() throws Exception {
-    Configuration conf = WebHdfsTestUtil.createConf();
+    conf = WebHdfsTestUtil.createConf();
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
     cluster.waitActive();
-    fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
-    assertTrue(fs instanceof WebHdfsFileSystem);
   }
 
   /**
@@ -51,4 +48,29 @@ public class TestWebHDFSAcl extends FSAc
   @Ignore
   public void testDefaultAclNewSymlinkIntermediate() {
   }
+
+  /**
+   * Overridden to provide a WebHdfsFileSystem wrapper for the super-user.
+   *
+   * @return WebHdfsFileSystem for super-user
+   * @throws Exception if creation fails
+   */
+  @Override
+  protected WebHdfsFileSystem createFileSystem() throws Exception {
+    return WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
+  }
+
+  /**
+   * Overridden to provide a WebHdfsFileSystem wrapper for a specific user.
+   *
+   * @param user UserGroupInformation specific user
+   * @return WebHdfsFileSystem for specific user
+   * @throws Exception if creation fails
+   */
+  @Override
+  protected WebHdfsFileSystem createFileSystem(UserGroupInformation user)
+      throws Exception {
+    return WebHdfsTestUtil.getWebHdfsFileSystemAs(user, conf,
+      WebHdfsFileSystem.SCHEME);
+  }
 }

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java?rev=1572308&r1=1572307&r2=1572308&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java Wed Feb 26 22:32:27 2014
@@ -21,12 +21,14 @@ import static org.junit.Assert.assertNot
 import static org.junit.Assert.assertNull;
 
 import java.util.Arrays;
+import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.util.StringUtils;
@@ -300,4 +302,48 @@ public class TestParam {
 
     UserParam.setUserPatternDomain(oldDomain);
   }
+
+  @Test
+  public void testAclPermissionParam() {
+    final AclPermissionParam p =
+        new AclPermissionParam("user::rwx,group::r--,other::rwx,user:user1:rwx");
+    List<AclEntry> setAclList =
+        AclEntry.parseAclSpec("user::rwx,group::r--,other::rwx,user:user1:rwx",
+            true);
+    Assert.assertEquals(setAclList.toString(), p.getAclPermission(true)
+        .toString());
+
+    new AclPermissionParam("user::rw-,group::rwx,other::rw-,user:user1:rwx");
+    try {
+      new AclPermissionParam("user::rw--,group::rwx-,other::rw-");
+      Assert.fail();
+    } catch (IllegalArgumentException e) {
+      LOG.info("EXPECTED: " + e);
+    }
+
+    new AclPermissionParam(
+        "user::rw-,group::rwx,other::rw-,user:user1:rwx,group:group1:rwx,other::rwx,mask::rwx,default:user:user1:rwx");
+
+    try {
+      new AclPermissionParam("user:r-,group:rwx,other:rw-");
+      Assert.fail();
+    } catch (IllegalArgumentException e) {
+      LOG.info("EXPECTED: " + e);
+    }
+
+    try {
+      new AclPermissionParam("default:::r-,default:group::rwx,other::rw-");
+      Assert.fail();
+    } catch (IllegalArgumentException e) {
+      LOG.info("EXPECTED: " + e);
+    }
+
+    try {
+      new AclPermissionParam("user:r-,group::rwx,other:rw-,mask:rw-,temp::rwx");
+      Assert.fail();
+    } catch (IllegalArgumentException e) {
+      LOG.info("EXPECTED: " + e);
+    }
+  }
+ 
 }

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java?rev=1572308&r1=1572307&r2=1572308&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java Wed Feb 26 22:32:27 2014
@@ -17,6 +17,10 @@
  */
 package org.apache.hadoop.security;
 
+import static org.apache.hadoop.fs.permission.AclEntryScope.*;
+import static org.apache.hadoop.fs.permission.AclEntryType.*;
+import static org.apache.hadoop.fs.permission.FsAction.*;
+import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -24,6 +28,7 @@ import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
+import java.util.Arrays;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -66,6 +71,7 @@ public class TestPermissionSymlinks {
   @BeforeClass
   public static void beforeClassSetUp() throws Exception {
     conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
     conf.set(FsPermission.UMASK_LABEL, "000");
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     cluster.waitActive();
@@ -101,8 +107,43 @@ public class TestPermissionSymlinks {
 
   @Test(timeout = 5000)
   public void testDelete() throws Exception {
-    // Try to delete where the symlink's parent dir is not writable
     fs.setPermission(linkParent, new FsPermission((short) 0555));
+    doDeleteLinkParentNotWritable();
+
+    fs.setPermission(linkParent, new FsPermission((short) 0777));
+    fs.setPermission(targetParent, new FsPermission((short) 0555));
+    fs.setPermission(target, new FsPermission((short) 0555));
+    doDeleteTargetParentAndTargetNotWritable();
+  }
+
+  @Test
+  public void testAclDelete() throws Exception {
+    fs.setAcl(linkParent, Arrays.asList(
+      aclEntry(ACCESS, USER, ALL),
+      aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
+      aclEntry(ACCESS, GROUP, ALL),
+      aclEntry(ACCESS, OTHER, ALL)));
+    doDeleteLinkParentNotWritable();
+
+    fs.setAcl(linkParent, Arrays.asList(
+      aclEntry(ACCESS, USER, ALL),
+      aclEntry(ACCESS, GROUP, ALL),
+      aclEntry(ACCESS, OTHER, ALL)));
+    fs.setAcl(targetParent, Arrays.asList(
+      aclEntry(ACCESS, USER, ALL),
+      aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
+      aclEntry(ACCESS, GROUP, ALL),
+      aclEntry(ACCESS, OTHER, ALL)));
+    fs.setAcl(target, Arrays.asList(
+      aclEntry(ACCESS, USER, ALL),
+      aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
+      aclEntry(ACCESS, GROUP, ALL),
+      aclEntry(ACCESS, OTHER, ALL)));
+    doDeleteTargetParentAndTargetNotWritable();
+  }
+
+  private void doDeleteLinkParentNotWritable() throws Exception {
+    // Try to delete where the symlink's parent dir is not writable
     try {
       user.doAs(new PrivilegedExceptionAction<Object>() {
         @Override
@@ -116,11 +157,11 @@ public class TestPermissionSymlinks {
     } catch (AccessControlException e) {
       GenericTestUtils.assertExceptionContains("Permission denied", e);
     }
+  }
+
+  private void doDeleteTargetParentAndTargetNotWritable() throws Exception {
     // Try a delete where the symlink parent dir is writable,
     // but the target's parent and target are not
-    fs.setPermission(linkParent, new FsPermission((short) 0777));
-    fs.setPermission(targetParent, new FsPermission((short) 0555));
-    fs.setPermission(target, new FsPermission((short) 0555));
     user.doAs(new PrivilegedExceptionAction<Object>() {
       @Override
       public Object run() throws IOException {
@@ -139,6 +180,20 @@ public class TestPermissionSymlinks {
   @Test(timeout = 5000)
   public void testReadWhenTargetNotReadable() throws Exception {
     fs.setPermission(target, new FsPermission((short) 0000));
+    doReadTargetNotReadable();
+  }
+
+  @Test
+  public void testAclReadTargetNotReadable() throws Exception {
+    fs.setAcl(target, Arrays.asList(
+      aclEntry(ACCESS, USER, READ_WRITE),
+      aclEntry(ACCESS, USER, user.getUserName(), NONE),
+      aclEntry(ACCESS, GROUP, READ),
+      aclEntry(ACCESS, OTHER, READ)));
+    doReadTargetNotReadable();
+  }
+
+  private void doReadTargetNotReadable() throws Exception {
     try {
       user.doAs(new PrivilegedExceptionAction<Object>() {
         @Override
@@ -157,8 +212,22 @@ public class TestPermissionSymlinks {
 
   @Test(timeout = 5000)
   public void testFileStatus() throws Exception {
-    // Try to getFileLinkStatus the link when the target is not readable
     fs.setPermission(target, new FsPermission((short) 0000));
+    doGetFileLinkStatusTargetNotReadable();
+  }
+
+  @Test
+  public void testAclGetFileLinkStatusTargetNotReadable() throws Exception {
+    fs.setAcl(target, Arrays.asList(
+      aclEntry(ACCESS, USER, READ_WRITE),
+      aclEntry(ACCESS, USER, user.getUserName(), NONE),
+      aclEntry(ACCESS, GROUP, READ),
+      aclEntry(ACCESS, OTHER, READ)));
+    doGetFileLinkStatusTargetNotReadable();
+  }
+
+  private void doGetFileLinkStatusTargetNotReadable() throws Exception {
+    // Try to getFileLinkStatus the link when the target is not readable
     user.doAs(new PrivilegedExceptionAction<Object>() {
       @Override
       public Object run() throws IOException {
@@ -176,9 +245,28 @@ public class TestPermissionSymlinks {
 
   @Test(timeout = 5000)
   public void testRenameLinkTargetNotWritableFC() throws Exception {
-    // Rename the link when the target and parent are not writable
     fs.setPermission(target, new FsPermission((short) 0555));
     fs.setPermission(targetParent, new FsPermission((short) 0555));
+    doRenameLinkTargetNotWritableFC();
+  }
+
+  @Test
+  public void testAclRenameTargetNotWritableFC() throws Exception {
+    fs.setAcl(target, Arrays.asList(
+      aclEntry(ACCESS, USER, ALL),
+      aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
+      aclEntry(ACCESS, GROUP, ALL),
+      aclEntry(ACCESS, OTHER, ALL)));
+    fs.setAcl(targetParent, Arrays.asList(
+      aclEntry(ACCESS, USER, ALL),
+      aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
+      aclEntry(ACCESS, GROUP, ALL),
+      aclEntry(ACCESS, OTHER, ALL)));
+    doRenameLinkTargetNotWritableFC();
+  }
+
+  private void doRenameLinkTargetNotWritableFC() throws Exception {
+    // Rename the link when the target and parent are not writable
     user.doAs(new PrivilegedExceptionAction<Object>() {
       @Override
       public Object run() throws IOException {
@@ -197,8 +285,22 @@ public class TestPermissionSymlinks {
 
   @Test(timeout = 5000)
   public void testRenameSrcNotWritableFC() throws Exception {
-    // Rename the link when the target and parent are not writable
     fs.setPermission(linkParent, new FsPermission((short) 0555));
+    doRenameSrcNotWritableFC();
+  }
+
+  @Test
+  public void testAclRenameSrcNotWritableFC() throws Exception {
+    fs.setAcl(linkParent, Arrays.asList(
+      aclEntry(ACCESS, USER, ALL),
+      aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
+      aclEntry(ACCESS, GROUP, ALL),
+      aclEntry(ACCESS, OTHER, ALL)));
+    doRenameSrcNotWritableFC();
+  }
+
+  private void doRenameSrcNotWritableFC() throws Exception {
+    // Rename the link when the target and parent are not writable
     try {
       user.doAs(new PrivilegedExceptionAction<Object>() {
         @Override
@@ -220,9 +322,28 @@ public class TestPermissionSymlinks {
 
   @Test(timeout = 5000)
   public void testRenameLinkTargetNotWritableFS() throws Exception {
-    // Rename the link when the target and parent are not writable
     fs.setPermission(target, new FsPermission((short) 0555));
     fs.setPermission(targetParent, new FsPermission((short) 0555));
+    doRenameLinkTargetNotWritableFS();
+  }
+
+  @Test
+  public void testAclRenameTargetNotWritableFS() throws Exception {
+    fs.setAcl(target, Arrays.asList(
+      aclEntry(ACCESS, USER, ALL),
+      aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
+      aclEntry(ACCESS, GROUP, ALL),
+      aclEntry(ACCESS, OTHER, ALL)));
+    fs.setAcl(targetParent, Arrays.asList(
+      aclEntry(ACCESS, USER, ALL),
+      aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
+      aclEntry(ACCESS, GROUP, ALL),
+      aclEntry(ACCESS, OTHER, ALL)));
+    doRenameLinkTargetNotWritableFS();
+  }
+
+  private void doRenameLinkTargetNotWritableFS() throws Exception {
+    // Rename the link when the target and parent are not writable
     user.doAs(new PrivilegedExceptionAction<Object>() {
       @Override
       public Object run() throws IOException {
@@ -241,8 +362,22 @@ public class TestPermissionSymlinks {
 
   @Test(timeout = 5000)
   public void testRenameSrcNotWritableFS() throws Exception {
-    // Rename the link when the target and parent are not writable
     fs.setPermission(linkParent, new FsPermission((short) 0555));
+    doRenameSrcNotWritableFS();
+  }
+
+  @Test
+  public void testAclRenameSrcNotWritableFS() throws Exception {
+    fs.setAcl(linkParent, Arrays.asList(
+      aclEntry(ACCESS, USER, ALL),
+      aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
+      aclEntry(ACCESS, GROUP, ALL),
+      aclEntry(ACCESS, OTHER, ALL)));
+    doRenameSrcNotWritableFS();
+  }
+
+  private void doRenameSrcNotWritableFS() throws Exception {
+    // Rename the link when the target and parent are not writable
     try {
       user.doAs(new PrivilegedExceptionAction<Object>() {
         @Override
@@ -258,6 +393,4 @@ public class TestPermissionSymlinks {
       GenericTestUtils.assertExceptionContains("Permission denied", e);
     }
   }
-
-
 }

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored?rev=1572308&r1=1572307&r2=1572308&view=diff
==============================================================================
Binary files - no diff available.



Mime
View raw message