hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From whe...@apache.org
Subject svn commit: r1560131 - in /hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs: CHANGES-HDFS-4685.txt src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java
Date Tue, 21 Jan 2014 18:33:02 GMT
Author: wheat9
Date: Tue Jan 21 18:33:02 2014
New Revision: 1560131

URL: http://svn.apache.org/r1560131
Log:
HDFS-5615. NameNode: implement handling of ACLs in combination with sticky bit. Contributed
by Chris Nauroth.

Modified:
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4685.txt
    hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4685.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4685.txt?rev=1560131&r1=1560130&r2=1560131&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4685.txt
(original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4685.txt
Tue Jan 21 18:33:02 2014
@@ -39,6 +39,9 @@ HDFS-4685 (Unreleased)
     HDFS-5613. NameNode: implement handling of ACLs in combination with
     symlinks. (Chris Nauroth via wheat9)
 
+    HDFS-5615. NameNode: implement handling of ACLs in combination with sticky
+    bit. (Chris Nauroth via wheat9)
+
   OPTIMIZATIONS
 
   BUG FIXES

Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java?rev=1560131&r1=1560130&r2=1560131&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java
(original)
+++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java
Tue Jan 21 18:33:02 2014
@@ -17,15 +17,21 @@
  */
 package org.apache.hadoop.fs.permission;
 
+import static org.apache.hadoop.fs.permission.AclEntryScope.*;
+import static org.apache.hadoop.fs.permission.AclEntryType.*;
+import static org.apache.hadoop.fs.permission.FsAction.*;
+import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
+import java.util.Arrays;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -33,8 +39,12 @@ import org.apache.hadoop.hdfs.DFSTestUti
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.Test;
 
 public class TestStickyBit {
@@ -43,56 +53,88 @@ public class TestStickyBit {
     UserGroupInformation.createUserForTesting("theDoctor", new String[] {"tardis"});
   static UserGroupInformation user2 = 
     UserGroupInformation.createUserForTesting("rose", new String[] {"powellestates"});
-  
+
+  private static MiniDFSCluster cluster;
+  private static Configuration conf;
+  private static FileSystem hdfs;
+  private static FileSystem hdfsAsUser1;
+  private static FileSystem hdfsAsUser2;
+
+  @BeforeClass
+  public static void init() throws Exception {
+    conf = new HdfsConfiguration();
+    conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
+    initCluster(true);
+  }
+
+  private static void initCluster(boolean format) throws Exception {
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).format(format)
+      .build();
+    hdfs = cluster.getFileSystem();
+    assertTrue(hdfs instanceof DistributedFileSystem);
+    hdfsAsUser1 = DFSTestUtil.getFileSystemAs(user1, conf);
+    assertTrue(hdfsAsUser1 instanceof DistributedFileSystem);
+    hdfsAsUser2 = DFSTestUtil.getFileSystemAs(user2, conf);
+    assertTrue(hdfsAsUser2 instanceof DistributedFileSystem);
+  }
+
+  @Before
+  public void setup() throws Exception {
+    if (hdfs != null) {
+      for (FileStatus stat: hdfs.listStatus(new Path("/"))) {
+        hdfs.delete(stat.getPath(), true);
+      }
+    }
+  }
+
+  @AfterClass
+  public static void shutdown() throws Exception {
+    IOUtils.cleanup(null, hdfs, hdfsAsUser1, hdfsAsUser2);
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
   /**
    * Ensure that even if a file is in a directory with the sticky bit on,
    * another user can write to that file (assuming correct permissions).
    */
-  private void confirmCanAppend(Configuration conf, FileSystem hdfs,
-      Path baseDir) throws IOException, InterruptedException {
-    // Create a tmp directory with wide-open permissions and sticky bit
-    Path p = new Path(baseDir, "tmp");
-
-    hdfs.mkdirs(p);
-    hdfs.setPermission(p, new FsPermission((short) 01777));
-
+  private void confirmCanAppend(Configuration conf, Path p) throws Exception {
     // Write a file to the new tmp directory as a regular user
-    hdfs = DFSTestUtil.getFileSystemAs(user1, conf);
     Path file = new Path(p, "foo");
-    writeFile(hdfs, file);
-    hdfs.setPermission(file, new FsPermission((short) 0777));
+    writeFile(hdfsAsUser1, file);
+    hdfsAsUser1.setPermission(file, new FsPermission((short) 0777));
 
     // Log onto cluster as another user and attempt to append to file
-    hdfs = DFSTestUtil.getFileSystemAs(user2, conf);
     Path file2 = new Path(p, "foo");
-    FSDataOutputStream h = hdfs.append(file2);
-    h.write("Some more data".getBytes());
-    h.close();
+    FSDataOutputStream h = null;
+    try {
+      h = hdfsAsUser2.append(file2);
+      h.write("Some more data".getBytes());
+      h.close();
+      h = null;
+    } finally {
+      IOUtils.cleanup(null, h);
+    }
   }
 
   /**
    * Test that one user can't delete another user's file when the sticky bit is
    * set.
    */
-  private void confirmDeletingFiles(Configuration conf, FileSystem hdfs,
-      Path baseDir) throws IOException, InterruptedException {
-    Path p = new Path(baseDir, "contemporary");
-    hdfs.mkdirs(p);
-    hdfs.setPermission(p, new FsPermission((short) 01777));
-
+  private void confirmDeletingFiles(Configuration conf, Path p)
+      throws Exception {
     // Write a file to the new temp directory as a regular user
-    hdfs = DFSTestUtil.getFileSystemAs(user1, conf);
     Path file = new Path(p, "foo");
-    writeFile(hdfs, file);
+    writeFile(hdfsAsUser1, file);
 
     // Make sure the correct user is the owner
-    assertEquals(user1.getShortUserName(), hdfs.getFileStatus(file).getOwner());
+    assertEquals(user1.getShortUserName(),
+      hdfsAsUser1.getFileStatus(file).getOwner());
 
     // Log onto cluster as another user and attempt to delete the file
-    FileSystem hdfs2 = DFSTestUtil.getFileSystemAs(user2, conf);
-
     try {
-      hdfs2.delete(file, false);
+      hdfsAsUser2.delete(file, false);
       fail("Shouldn't be able to delete someone else's file with SB on");
     } catch (IOException ioe) {
       assertTrue(ioe instanceof AccessControlException);
@@ -105,13 +147,8 @@ public class TestStickyBit {
    * on, the new directory does not automatically get a sticky bit, as is
    * standard Unix behavior
    */
-  private void confirmStickyBitDoesntPropagate(FileSystem hdfs, Path baseDir)
+  private void confirmStickyBitDoesntPropagate(FileSystem hdfs, Path p)
       throws IOException {
-    Path p = new Path(baseDir, "scissorsisters");
-
-    // Turn on its sticky bit
-    hdfs.mkdirs(p, new FsPermission((short) 01666));
-
     // Create a subdirectory within it
     Path p2 = new Path(p, "bar");
     hdfs.mkdirs(p2);
@@ -123,23 +160,19 @@ public class TestStickyBit {
   /**
    * Test basic ability to get and set sticky bits on files and directories.
    */
-  private void confirmSettingAndGetting(FileSystem hdfs, Path baseDir)
+  private void confirmSettingAndGetting(FileSystem hdfs, Path p, Path baseDir)
       throws IOException {
-    Path p1 = new Path(baseDir, "roguetraders");
-
-    hdfs.mkdirs(p1);
-
     // Initially sticky bit should not be set
-    assertFalse(hdfs.getFileStatus(p1).getPermission().getStickyBit());
+    assertFalse(hdfs.getFileStatus(p).getPermission().getStickyBit());
 
     // Same permission, but with sticky bit on
     short withSB;
-    withSB = (short) (hdfs.getFileStatus(p1).getPermission().toShort() | 01000);
+    withSB = (short) (hdfs.getFileStatus(p).getPermission().toShort() | 01000);
 
     assertTrue((new FsPermission(withSB)).getStickyBit());
 
-    hdfs.setPermission(p1, new FsPermission(withSB));
-    assertTrue(hdfs.getFileStatus(p1).getPermission().getStickyBit());
+    hdfs.setPermission(p, new FsPermission(withSB));
+    assertTrue(hdfs.getFileStatus(p).getPermission().getStickyBit());
 
     // Write a file to the fs, try to set its sticky bit
     Path f = new Path(baseDir, "somefile");
@@ -154,37 +187,78 @@ public class TestStickyBit {
   }
 
   @Test
-  public void testGeneralSBBehavior() throws IOException, InterruptedException {
-    MiniDFSCluster cluster = null;
-    try {
-      Configuration conf = new HdfsConfiguration();
-      conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+  public void testGeneralSBBehavior() throws Exception {
+    Path baseDir = new Path("/mcgann");
+    hdfs.mkdirs(baseDir);
+
+    // Create a tmp directory with wide-open permissions and sticky bit
+    Path p = new Path(baseDir, "tmp");
+
+    hdfs.mkdirs(p);
+    hdfs.setPermission(p, new FsPermission((short) 01777));
+
+    confirmCanAppend(conf, p);
 
-      FileSystem hdfs = cluster.getFileSystem();
+    baseDir = new Path("/eccleston");
+    hdfs.mkdirs(baseDir);
+    p = new Path(baseDir, "roguetraders");
 
-      assertTrue(hdfs instanceof DistributedFileSystem);
+    hdfs.mkdirs(p);
+    confirmSettingAndGetting(hdfs, p, baseDir);
 
-      Path baseDir = new Path("/mcgann");
-      hdfs.mkdirs(baseDir);
-      confirmCanAppend(conf, hdfs, baseDir);
+    baseDir = new Path("/tennant");
+    hdfs.mkdirs(baseDir);
+    p = new Path(baseDir, "contemporary");
+    hdfs.mkdirs(p);
+    hdfs.setPermission(p, new FsPermission((short) 01777));
+    confirmDeletingFiles(conf, p);
 
-      baseDir = new Path("/eccleston");
-      hdfs.mkdirs(baseDir);
-      confirmSettingAndGetting(hdfs, baseDir);
+    baseDir = new Path("/smith");
+    hdfs.mkdirs(baseDir);
+    p = new Path(baseDir, "scissorsisters");
 
-      baseDir = new Path("/tennant");
-      hdfs.mkdirs(baseDir);
-      confirmDeletingFiles(conf, hdfs, baseDir);
+    // Turn on its sticky bit
+    hdfs.mkdirs(p, new FsPermission((short) 01666));
+    confirmStickyBitDoesntPropagate(hdfs, baseDir);
+  }
 
-      baseDir = new Path("/smith");
-      hdfs.mkdirs(baseDir);
-      confirmStickyBitDoesntPropagate(hdfs, baseDir);
+  @Test
+  public void testAclGeneralSBBehavior() throws Exception {
+    Path baseDir = new Path("/mcgann");
+    hdfs.mkdirs(baseDir);
 
-    } finally {
-      if (cluster != null)
-        cluster.shutdown();
-    }
+    // Create a tmp directory with wide-open permissions and sticky bit
+    Path p = new Path(baseDir, "tmp");
+
+    hdfs.mkdirs(p);
+    hdfs.setPermission(p, new FsPermission((short) 01777));
+    applyAcl(p);
+    confirmCanAppend(conf, p);
+
+    baseDir = new Path("/eccleston");
+    hdfs.mkdirs(baseDir);
+    p = new Path(baseDir, "roguetraders");
+
+    hdfs.mkdirs(p);
+    applyAcl(p);
+    confirmSettingAndGetting(hdfs, p, baseDir);
+
+    baseDir = new Path("/tennant");
+    hdfs.mkdirs(baseDir);
+    p = new Path(baseDir, "contemporary");
+    hdfs.mkdirs(p);
+    hdfs.setPermission(p, new FsPermission((short) 01777));
+    applyAcl(p);
+    confirmDeletingFiles(conf, p);
+
+    baseDir = new Path("/smith");
+    hdfs.mkdirs(baseDir);
+    p = new Path(baseDir, "scissorsisters");
+
+    // Turn on its sticky bit
+    hdfs.mkdirs(p, new FsPermission((short) 01666));
+    applyAcl(p);
+    confirmStickyBitDoesntPropagate(hdfs, p);
   }
 
   /**
@@ -192,46 +266,42 @@ public class TestStickyBit {
    * bit is set.
    */
   @Test
-  public void testMovingFiles() throws IOException, InterruptedException {
-    MiniDFSCluster cluster = null;
+  public void testMovingFiles() throws Exception {
+    testMovingFiles(false);
+  }
+
+  @Test
+  public void testAclMovingFiles() throws Exception {
+    testMovingFiles(true);
+  }
+
+  private void testMovingFiles(boolean useAcl) throws Exception {
+    // Create a tmp directory with wide-open permissions and sticky bit
+    Path tmpPath = new Path("/tmp");
+    Path tmpPath2 = new Path("/tmp2");
+    hdfs.mkdirs(tmpPath);
+    hdfs.mkdirs(tmpPath2);
+    hdfs.setPermission(tmpPath, new FsPermission((short) 01777));
+    if (useAcl) {
+      applyAcl(tmpPath);
+    }
+    hdfs.setPermission(tmpPath2, new FsPermission((short) 01777));
+    if (useAcl) {
+      applyAcl(tmpPath2);
+    }
+
+    // Write a file to the new tmp directory as a regular user
+    Path file = new Path(tmpPath, "foo");
 
+    writeFile(hdfsAsUser1, file);
+
+    // Log onto cluster as another user and attempt to move the file
     try {
-      // Set up cluster for testing
-      Configuration conf = new HdfsConfiguration();
-      conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
-      FileSystem hdfs = cluster.getFileSystem();
-
-      assertTrue(hdfs instanceof DistributedFileSystem);
-
-      // Create a tmp directory with wide-open permissions and sticky bit
-      Path tmpPath = new Path("/tmp");
-      Path tmpPath2 = new Path("/tmp2");
-      hdfs.mkdirs(tmpPath);
-      hdfs.mkdirs(tmpPath2);
-      hdfs.setPermission(tmpPath, new FsPermission((short) 01777));
-      hdfs.setPermission(tmpPath2, new FsPermission((short) 01777));
-
-      // Write a file to the new tmp directory as a regular user
-      Path file = new Path(tmpPath, "foo");
-
-      FileSystem hdfs2 = DFSTestUtil.getFileSystemAs(user1, conf);
-
-      writeFile(hdfs2, file);
-
-      // Log onto cluster as another user and attempt to move the file
-      FileSystem hdfs3 = DFSTestUtil.getFileSystemAs(user2, conf);
-
-      try {
-        hdfs3.rename(file, new Path(tmpPath2, "renamed"));
-        fail("Shouldn't be able to rename someone else's file with SB on");
-      } catch (IOException ioe) {
-        assertTrue(ioe instanceof AccessControlException);
-        assertTrue(ioe.getMessage().contains("sticky bit"));
-      }
-    } finally {
-      if (cluster != null)
-        cluster.shutdown();
+      hdfsAsUser2.rename(file, new Path(tmpPath2, "renamed"));
+      fail("Shouldn't be able to rename someone else's file with SB on");
+    } catch (IOException ioe) {
+      assertTrue(ioe instanceof AccessControlException);
+      assertTrue(ioe.getMessage().contains("sticky bit"));
     }
   }
 
@@ -241,56 +311,91 @@ public class TestStickyBit {
    * re-start.
    */
   @Test
-  public void testStickyBitPersistence() throws IOException {
-    MiniDFSCluster cluster = null;
-    try {
-      Configuration conf = new HdfsConfiguration();
-      conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
-      FileSystem hdfs = cluster.getFileSystem();
-
-      assertTrue(hdfs instanceof DistributedFileSystem);
-
-      // A tale of three directories...
-      Path sbSet = new Path("/Housemartins");
-      Path sbNotSpecified = new Path("/INXS");
-      Path sbSetOff = new Path("/Easyworld");
-
-      for (Path p : new Path[] { sbSet, sbNotSpecified, sbSetOff })
-        hdfs.mkdirs(p);
-
-      // Two directories had there sticky bits set explicitly...
-      hdfs.setPermission(sbSet, new FsPermission((short) 01777));
-      hdfs.setPermission(sbSetOff, new FsPermission((short) 00777));
+  public void testStickyBitPersistence() throws Exception {
+    // A tale of three directories...
+    Path sbSet = new Path("/Housemartins");
+    Path sbNotSpecified = new Path("/INXS");
+    Path sbSetOff = new Path("/Easyworld");
 
-      cluster.shutdown();
+    for (Path p : new Path[] { sbSet, sbNotSpecified, sbSetOff })
+      hdfs.mkdirs(p);
 
-      // Start file system up again
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).format(false).build();
-      hdfs = cluster.getFileSystem();
-
-      assertTrue(hdfs.exists(sbSet));
-      assertTrue(hdfs.getFileStatus(sbSet).getPermission().getStickyBit());
-
-      assertTrue(hdfs.exists(sbNotSpecified));
-      assertFalse(hdfs.getFileStatus(sbNotSpecified).getPermission()
-          .getStickyBit());
+    // Two directories had there sticky bits set explicitly...
+    hdfs.setPermission(sbSet, new FsPermission((short) 01777));
+    hdfs.setPermission(sbSetOff, new FsPermission((short) 00777));
 
-      assertTrue(hdfs.exists(sbSetOff));
-      assertFalse(hdfs.getFileStatus(sbSetOff).getPermission().getStickyBit());
+    shutdown();
 
-    } finally {
-      if (cluster != null)
-        cluster.shutdown();
-    }
+    // Start file system up again
+    initCluster(false);
+
+    assertTrue(hdfs.exists(sbSet));
+    assertTrue(hdfs.getFileStatus(sbSet).getPermission().getStickyBit());
+
+    assertTrue(hdfs.exists(sbNotSpecified));
+    assertFalse(hdfs.getFileStatus(sbNotSpecified).getPermission()
+        .getStickyBit());
+
+    assertTrue(hdfs.exists(sbSetOff));
+    assertFalse(hdfs.getFileStatus(sbSetOff).getPermission().getStickyBit());
+  }
+
+  @Test
+  public void testAclStickyBitPersistence() throws Exception {
+    // A tale of three directories...
+    Path sbSet = new Path("/Housemartins");
+    Path sbNotSpecified = new Path("/INXS");
+    Path sbSetOff = new Path("/Easyworld");
+
+    for (Path p : new Path[] { sbSet, sbNotSpecified, sbSetOff })
+      hdfs.mkdirs(p);
+
+    // Two directories had there sticky bits set explicitly...
+    hdfs.setPermission(sbSet, new FsPermission((short) 01777));
+    applyAcl(sbSet);
+    hdfs.setPermission(sbSetOff, new FsPermission((short) 00777));
+    applyAcl(sbSetOff);
+
+    shutdown();
+
+    // Start file system up again
+    initCluster(false);
+
+    assertTrue(hdfs.exists(sbSet));
+    assertTrue(hdfs.getFileStatus(sbSet).getPermission().getStickyBit());
+
+    assertTrue(hdfs.exists(sbNotSpecified));
+    assertFalse(hdfs.getFileStatus(sbNotSpecified).getPermission()
+        .getStickyBit());
+
+    assertTrue(hdfs.exists(sbSetOff));
+    assertFalse(hdfs.getFileStatus(sbSetOff).getPermission().getStickyBit());
   }
 
   /***
    * Write a quick file to the specified file system at specified path
    */
   static private void writeFile(FileSystem hdfs, Path p) throws IOException {
-    FSDataOutputStream o = hdfs.create(p);
-    o.write("some file contents".getBytes());
-    o.close();
+    FSDataOutputStream o = null;
+    try {
+      o = hdfs.create(p);
+      o.write("some file contents".getBytes());
+      o.close();
+      o = null;
+    } finally {
+      IOUtils.cleanup(null, o);
+    }
+  }
+
+  /**
+   * Applies an ACL (both access and default) to the given path.
+   *
+   * @param p Path to set
+   * @throws IOException if an ACL could not be modified
+   */
+  private static void applyAcl(Path p) throws IOException {
+    hdfs.modifyAclEntries(p, Arrays.asList(
+      aclEntry(ACCESS, USER, user2.getShortUserName(), ALL),
+      aclEntry(DEFAULT, USER, user2.getShortUserName(), ALL)));
   }
 }



Mime
View raw message