hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r734590 - in /hadoop/core/trunk: ./ src/core/org/apache/hadoop/fs/ src/core/org/apache/hadoop/fs/permission/ src/docs/src/documentation/content/xdocs/ src/hdfs/org/apache/hadoop/hdfs/protocol/ src/hdfs/org/apache/hadoop/hdfs/server/namenode...
Date Thu, 15 Jan 2009 01:36:21 GMT
Author: szetszwo
Date: Wed Jan 14 17:36:20 2009
New Revision: 734590

URL: http://svn.apache.org/viewvc?rev=734590&view=rev
Log:
HADOOP-3953. Implement sticky bit for directories in HDFS. (Jakob Homan via szetszwo)

Added:
    hadoop/core/trunk/src/test/org/apache/hadoop/fs/permission/TestStickyBit.java
Modified:
    hadoop/core/trunk/CHANGES.txt
    hadoop/core/trunk/src/core/org/apache/hadoop/fs/FsShell.java
    hadoop/core/trunk/src/core/org/apache/hadoop/fs/FsShellPermissions.java
    hadoop/core/trunk/src/core/org/apache/hadoop/fs/RawLocalFileSystem.java
    hadoop/core/trunk/src/core/org/apache/hadoop/fs/permission/FsPermission.java
    hadoop/core/trunk/src/docs/src/documentation/content/xdocs/hdfs_design.xml
    hadoop/core/trunk/src/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/FSConstants.java
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/PermissionChecker.java
    hadoop/core/trunk/src/test/org/apache/hadoop/fs/permission/TestFsPermission.java
    hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDFSShell.java

Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=734590&r1=734589&r2=734590&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Wed Jan 14 17:36:20 2009
@@ -22,6 +22,9 @@
     corresponding permission requirement for running the ClientProtocol
     methods will be enforced.  (szetszwo)
 
+    HADOOP-3953. Implement sticky bit for directories in HDFS. (Jakob Homan
+    via szetszwo)
+
   IMPROVEMENTS
 
     HADOOP-4936. Improvements to TestSafeMode. (shv)

Modified: hadoop/core/trunk/src/core/org/apache/hadoop/fs/FsShell.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/core/org/apache/hadoop/fs/FsShell.java?rev=734590&r1=734589&r2=734590&view=diff
==============================================================================
--- hadoop/core/trunk/src/core/org/apache/hadoop/fs/FsShell.java (original)
+++ hadoop/core/trunk/src/core/org/apache/hadoop/fs/FsShell.java Wed Jan 14 17:36:20 2009
@@ -22,8 +22,6 @@
 import java.io.IOException;
 import java.io.InputStream;
 import java.net.URI;
-import java.text.DecimalFormat;
-import java.text.NumberFormat;
 import java.text.SimpleDateFormat;
 import java.util.*;
 import java.util.zip.GZIPInputStream;
@@ -1364,9 +1362,10 @@
       "\t-R\tmodifies the files recursively. This is the only option\n" +
       "\t\tcurrently supported.\n\n" +
       "\tMODE\tMode is same as mode used for chmod shell command.\n" +
-      "\t\tOnly letters recognized are 'rwxX'. E.g. a+r,g-w,+rwx,o=r\n\n" +
-      "\tOCTALMODE Mode specifed in 3 digits. Unlike shell command,\n" +
-      "\t\tthis requires all three digits.\n" +
+      "\t\tOnly letters recognized are 'rwxXt'. E.g. +t,a+r,g-w,+rwx,o=r\n\n" +
+      "\tOCTALMODE Mode specifed in 3 or 4 digits. If 4 digits, the first may\n" +
+      "\tbe 1 or 0 to turn the sticky bit on or off, respectively.  Unlike " +
+      "\tshell command, it is not possible to specify only part of the mode\n" +
       "\t\tE.g. 754 is same as u=rwx,g=rx,o=r\n\n" +
       "\t\tIf none of 'augo' is specified, 'a' is assumed and unlike\n" +
       "\t\tshell command, no umask is applied.\n";

Modified: hadoop/core/trunk/src/core/org/apache/hadoop/fs/FsShellPermissions.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/core/org/apache/hadoop/fs/FsShellPermissions.java?rev=734590&r1=734589&r2=734590&view=diff
==============================================================================
--- hadoop/core/trunk/src/core/org/apache/hadoop/fs/FsShellPermissions.java (original)
+++ hadoop/core/trunk/src/core/org/apache/hadoop/fs/FsShellPermissions.java Wed Jan 14 17:36:20
2009
@@ -27,31 +27,40 @@
 
 /**
  * This class is the home for file permissions related commands.
- * Moved to this seperate class since FsShell is getting too large.
+ * Moved to this separate class since FsShell is getting too large.
  */
 class FsShellPermissions {
   
   /*========== chmod ==========*/
-   
-  /* The pattern is alsmost as flexible as mode allowed by 
-   * chmod shell command. The main restriction is that we recognize only rwxX.
-   * To reduce errors we also enforce 3 digits for octal mode.
-   */  
-  private static Pattern chmodNormalPattern = 
-             Pattern.compile("\\G\\s*([ugoa]*)([+=-]+)([rwxX]+)([,\\s]*)\\s*");
+
+  /*
+   * The pattern is almost as flexible as mode allowed by chmod shell command.
+   * The main restriction is that we recognize only rwxXt. To reduce errors we
+   * also enforce octal mode specifications of either 3 digits without a sticky
+   * bit setting or four digits with a sticky bit setting.
+   */
+  private static Pattern chmodNormalPattern =
+   Pattern.compile("\\G\\s*([ugoa]*)([+=-]+)([rwxXt]+)([,\\s]*)\\s*");
   private static Pattern chmodOctalPattern =
-            Pattern.compile("^\\s*[+]?([0-7]{3})\\s*$");
-  
-  static String CHMOD_USAGE = 
+            Pattern.compile("^\\s*[+]?([01]?)([0-7]{3})\\s*$");
+
+  static String CHMOD_USAGE =
                             "-chmod [-R] <MODE[,MODE]... | OCTALMODE> PATH...";
 
   private static class ChmodHandler extends CmdHandler {
 
-    private short userMode, groupMode, othersMode;
-    private char userType = '+', groupType = '+', othersType='+';
+    private short userMode;
+    private short groupMode;
+    private short othersMode;
+    private short stickyMode;
+    private char userType = '+';
+    private char groupType = '+';
+    private char othersType = '+';
+    private char stickyBitType = '+';
 
     private void applyNormalPattern(String modeStr, Matcher matcher)
                                     throws IOException {
+      // Are there multiple permissions stored in one chmod?
       boolean commaSeperated = false;
 
       for(int i=0; i < 1 || matcher.end() < modeStr.length(); i++) {
@@ -61,15 +70,15 @@
 
         /* groups : 1 : [ugoa]*
          *          2 : [+-=]
-         *          3 : [rwxX]+
+         *          3 : [rwxXt]+
          *          4 : [,\s]*
          */
 
         String str = matcher.group(2);
         char type = str.charAt(str.length() - 1);
 
-        boolean user, group, others;
-        user = group = others = false;
+        boolean user, group, others, stickyBit;
+        user = group = others = stickyBit = false;
 
         for(char c : matcher.group(1).toCharArray()) {
           switch (c) {
@@ -85,13 +94,15 @@
           user = group = others = true;
         }
 
-        short  mode = 0;
+        short mode = 0;
+
         for(char c : matcher.group(3).toCharArray()) {
           switch (c) {
           case 'r' : mode |= 4; break;
           case 'w' : mode |= 2; break;
           case 'x' : mode |= 1; break;
           case 'X' : mode |= 8; break;
+          case 't' : stickyBit = true; break;
           default  : throw new RuntimeException("Unexpected");
           }
         }
@@ -109,6 +120,9 @@
         if ( others ) {
           othersMode = mode;
           othersType = type;
+          
+          stickyMode = (short) (stickyBit ? 1 : 0);
+          stickyBitType = type;
         }
 
         commaSeperated = matcher.group(4).contains(",");
@@ -117,7 +131,15 @@
 
     private void applyOctalPattern(String modeStr, Matcher matcher) {
       userType = groupType = othersType = '=';
-      String str = matcher.group(1);
+
+      // Check if sticky bit is specified
+      String sb = matcher.group(1);
+      if(!sb.isEmpty()) {
+        stickyMode = Short.valueOf(sb.substring(0, 1));
+        stickyBitType = '=';
+      }
+
+      String str = matcher.group(2);
       userMode = Short.valueOf(str.substring(0, 1));
       groupMode = Short.valueOf(str.substring(1, 2));
       othersMode = Short.valueOf(str.substring(2, 3));      
@@ -170,11 +192,13 @@
       FsPermission perms = file.getPermission();
       int existing = perms.toShort();
       boolean exeOk = file.isDir() || (existing & 0111) != 0;
-      int newperms = ( applyChmod(userType, userMode, 
-                                  (existing>>>6)&7, exeOk) << 6 |
-                       applyChmod(groupType, groupMode, 
-                                  (existing>>>3)&7, exeOk) << 3 |
-                       applyChmod(othersType, othersMode, existing&7, exeOk) );
+      int newperms = ( applyChmod(stickyBitType, stickyMode,
+                             (existing>>>9), false) << 9 |
+                       applyChmod(userType, userMode,
+                             (existing>>>6)&7, exeOk) << 6 |
+                       applyChmod(groupType, groupMode,
+                             (existing>>>3)&7, exeOk) << 3 |
+                       applyChmod(othersType, othersMode, existing&7, exeOk));
 
       if (existing != newperms) {
         try {

Modified: hadoop/core/trunk/src/core/org/apache/hadoop/fs/RawLocalFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/core/org/apache/hadoop/fs/RawLocalFileSystem.java?rev=734590&r1=734589&r2=734590&view=diff
==============================================================================
--- hadoop/core/trunk/src/core/org/apache/hadoop/fs/RawLocalFileSystem.java (original)
+++ hadoop/core/trunk/src/core/org/apache/hadoop/fs/RawLocalFileSystem.java Wed Jan 14 17:36:20
2009
@@ -471,7 +471,7 @@
   public void setPermission(Path p, FsPermission permission
       ) throws IOException {
     execCommand(pathToFile(p), Shell.SET_PERMISSION_COMMAND,
-        String.format("%04o", permission.toShort()));
+        String.format("%05o", permission.toShort()));
   }
 
   private static String execCommand(File f, String... cmd) throws IOException {

Modified: hadoop/core/trunk/src/core/org/apache/hadoop/fs/permission/FsPermission.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/core/org/apache/hadoop/fs/permission/FsPermission.java?rev=734590&r1=734589&r2=734590&view=diff
==============================================================================
--- hadoop/core/trunk/src/core/org/apache/hadoop/fs/permission/FsPermission.java (original)
+++ hadoop/core/trunk/src/core/org/apache/hadoop/fs/permission/FsPermission.java Wed Jan 14
17:36:20 2009
@@ -51,6 +51,7 @@
   private FsAction useraction = null;
   private FsAction groupaction = null;
   private FsAction otheraction = null;
+  private boolean stickyBit = false;
 
   private FsPermission() {}
 
@@ -60,7 +61,13 @@
    * @param g group action
    * @param o other action
    */
-  public FsPermission(FsAction u, FsAction g, FsAction o) {set(u, g, o);}
+  public FsPermission(FsAction u, FsAction g, FsAction o) {
+    this(u, g, o, false);
+  }
+
+  public FsPermission(FsAction u, FsAction g, FsAction o, boolean sb) {
+    set(u, g, o, sb);
+  }
 
   /**
    * Construct by the given mode.
@@ -89,14 +96,17 @@
   /** Return other {@link FsAction}. */
   public FsAction getOtherAction() {return otheraction;}
 
-  private void set(FsAction u, FsAction g, FsAction o) {
+  private void set(FsAction u, FsAction g, FsAction o, boolean sb) {
     useraction = u;
     groupaction = g;
     otheraction = o;
+    stickyBit = sb;
   }
+
   public void fromShort(short n) {
     FsAction[] v = FsAction.values();
-    set(v[(n >>> 6) & 7], v[(n >>> 3) & 7], v[n & 7]);
+
+    set(v[(n >>> 6) & 7], v[(n >>> 3) & 7], v[n & 7], (((n
>>> 9) & 1) == 1) );
   }
 
   /** {@inheritDoc} */
@@ -122,8 +132,11 @@
    * Encode the object to a short.
    */
   public short toShort() {
-    int s = (useraction.ordinal() << 6) | (groupaction.ordinal() << 3) |
+    int s =  (stickyBit ? 1 << 9 : 0)     |
+             (useraction.ordinal() << 6)  |
+             (groupaction.ordinal() << 3) |
              otheraction.ordinal();
+
     return (short)s;
   }
 
@@ -133,7 +146,8 @@
       FsPermission that = (FsPermission)obj;
       return this.useraction == that.useraction
           && this.groupaction == that.groupaction
-          && this.otheraction == that.otheraction;
+          && this.otheraction == that.otheraction
+          && this.stickyBit == that.stickyBit;
     }
     return false;
   }
@@ -143,7 +157,15 @@
 
   /** {@inheritDoc} */
   public String toString() {
-    return useraction.SYMBOL + groupaction.SYMBOL + otheraction.SYMBOL;
+    String str = useraction.SYMBOL + groupaction.SYMBOL + otheraction.SYMBOL;
+    if(stickyBit) {
+      StringBuilder str2 = new StringBuilder(str);
+      str2.replace(str2.length() - 1, str2.length(),
+           otheraction.implies(FsAction.EXECUTE) ? "t" : "T");
+      str = str2.toString();
+    }
+
+    return str;
   }
 
   /** Apply a umask to this permission and return a new one */
@@ -165,6 +187,11 @@
     }
     return new FsPermission((short)umask);
   }
+
+  public boolean getStickyBit() {
+    return stickyBit;
+  }
+
   /** Set the user file creation mask (umask) */
   public static void setUMask(Configuration conf, FsPermission umask) {
     conf.setInt(UMASK_LABEL, umask.toShort());
@@ -172,7 +199,7 @@
 
   /** Get the default permission. */
   public static FsPermission getDefault() {
-    return new FsPermission((short)0777);
+    return new FsPermission((short)00777);
   }
 
   /**
@@ -187,12 +214,19 @@
       throw new IllegalArgumentException("length != 10(unixSymbolicPermission="
           + unixSymbolicPermission + ")");
     }
+
     int n = 0;
     for(int i = 1; i < unixSymbolicPermission.length(); i++) {
       n = n << 1;
       char c = unixSymbolicPermission.charAt(i);
       n += (c == '-' || c == 'T' || c == 'S') ? 0: 1;
     }
+
+    // Add sticky bit value if set
+    if(unixSymbolicPermission.charAt(9) == 't' ||
+        unixSymbolicPermission.charAt(9) == 'T')
+      n += 01000;
+
     return new FsPermission((short)n);
   }
 }

Modified: hadoop/core/trunk/src/docs/src/documentation/content/xdocs/hdfs_design.xml
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/docs/src/documentation/content/xdocs/hdfs_design.xml?rev=734590&r1=734589&r2=734590&view=diff
==============================================================================
--- hadoop/core/trunk/src/docs/src/documentation/content/xdocs/hdfs_design.xml (original)
+++ hadoop/core/trunk/src/docs/src/documentation/content/xdocs/hdfs_design.xml Wed Jan 14
17:36:20 2009
@@ -108,7 +108,7 @@
     <section>
       <title> The File System Namespace </title>
       <p>
-      HDFS supports a traditional hierarchical file organization. A user or an application
can create directories and store files inside these directories. The file system namespace
hierarchy is similar to most other existing file systems; one can create and remove files,
move a file from one directory to another, or rename a file. HDFS does not yet implement user
quotas or access permissions. HDFS does not support hard links or soft links. However, the
HDFS architecture does not preclude implementing these features.
+      HDFS supports a traditional hierarchical file organization. A user or an application
can create directories and store files inside these directories. The file system namespace
hierarchy is similar to most other existing file systems; one can create and remove files,
move a file from one directory to another, or rename a file. HDFS does not yet implement user
quotas. HDFS does not support hard links or soft links. However, the HDFS architecture does
not preclude implementing these features.
       </p>
       <p>
       The NameNode maintains the file system namespace. Any change to the file system namespace
or its properties is recorded by the NameNode. An application can specify the number of replicas
of a file that should be maintained by HDFS. The number of copies of a file is called the
replication factor of that file. This information is stored by the NameNode.

Modified: hadoop/core/trunk/src/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml?rev=734590&r1=734589&r2=734590&view=diff
==============================================================================
--- hadoop/core/trunk/src/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml
(original)
+++ hadoop/core/trunk/src/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml
Wed Jan 14 17:36:20 2009
@@ -30,7 +30,7 @@
   <body>
     <section> <title>Overview</title>
       <p>
-		The Hadoop Distributed File System (HDFS) implements a permissions model for files and
directories that shares much of the POSIX model. Each file and directory is associated with
an <em>owner</em> and a <em>group</em>. The file or directory has
separate permissions for the user that is the owner, for other users that are members of the
group, and for all other users. For files, the <em>r</em> permission is required
to read the file, and the <em>w</em> permission is required to write or append
to the file. For directories, the <em>r</em> permission is required to list the
contents of the directory, the <em>w</em> permission is required to create or
delete files or directories, and the <em>x</em> permission is required to access
a child of the directory. In contrast to the POSIX model, there are no <em>sticky</em>,
<em>setuid</em> or <em>setgid</em> bits for files as there is no notion
of executable files. For directories, there no <em>sticky</em>, <em>setuid</em>
or <em>set
 gid</em> bits directory as a simplification. Collectively, the permissions of a file
or directory are its <em>mode</em>. In general, Unix customs for representing
and displaying modes will be used, including the use of octal numbers in this description.
When a file or directory is created, its owner is the user identity of the client process,
and its group is the group of the parent directory (the BSD rule).
+		The Hadoop Distributed File System (HDFS) implements a permissions model for files and
directories that shares much of the POSIX model. Each file and directory is associated with
an <em>owner</em> and a <em>group</em>. The file or directory has
separate permissions for the user that is the owner, for other users that are members of the
group, and for all other users. For files, the <em>r</em> permission is required
to read the file, and the <em>w</em> permission is required to write or append
to the file. For directories, the <em>r</em> permission is required to list the
contents of the directory, the <em>w</em> permission is required to create or
delete files or directories, and the <em>x</em> permission is required to access
a child of the directory. In contrast to the POSIX model, there are no <em>setuid</em>
or <em>setgid</em> bits for files as there is no notion of executable files. For
directories, there are no <em>setuid</em> or <em>setgid</em> bits
directory as a s
 implification. The <em>Sticky bit</em> can be set on directories, preventing
anyone except the superuser, directory owner or file owner from deleting or moving the files
within the directory. Setting the sticky bit for a file has no effect. Collectively, the permissions
of a file or directory are its <em>mode</em>. In general, Unix customs for representing
and displaying modes will be used, including the use of octal numbers in this description.
When a file or directory is created, its owner is the user identity of the client process,
and its group is the group of the parent directory (the BSD rule).
 	</p>
 	<p>
 		Each client process that accesses HDFS has a two-part identity composed of the <em>user
name</em>, and <em>groups list</em>. Whenever HDFS must do a permissions
check for a file or directory <code>foo</code> accessed by a client process,
@@ -173,7 +173,7 @@
 	<dd>
 		The name of the group of super-users.
 	</dd>
-	<dt><code>dfs.upgrade.permission = 777</code></dt>
+	<dt><code>dfs.upgrade.permission = 0777</code></dt>
 	<dd>
 		The choice of initial mode during upgrade. The <em>x</em> permission is <em>never</em>
set for files. For configuration files, the decimal value <em>511<sub>10</sub></em>
may be used.
 	</dd>

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=734590&r1=734589&r2=734590&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Wed Jan
14 17:36:20 2009
@@ -41,9 +41,9 @@
    * Compared to the previous version the following changes have been introduced:
    * (Only the latest change is reflected.
    * The log of historical changes can be retrieved from the svn).
-   * 41: saveNamespace introduced.
+   * 42: updated to use sticky bit
    */
-  public static final long versionID = 41L;
+  public static final long versionID = 42L;
   
   ///////////////////////////////////////
   // File contents

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/FSConstants.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/FSConstants.java?rev=734590&r1=734589&r2=734590&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/FSConstants.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/FSConstants.java Wed Jan 14
17:36:20 2009
@@ -80,7 +80,7 @@
   // Version is reflected in the data storage file.
   // Versions are negative.
   // Decrement LAYOUT_VERSION to define a new version.
-  public static final int LAYOUT_VERSION = -18;
+  public static final int LAYOUT_VERSION = -19;
   // Current version: 
-  // Support disk space quotas
+  // -19: Sticky bit
 }

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=734590&r1=734589&r2=734590&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Wed
Jan 14 17:36:20 2009
@@ -384,7 +384,7 @@
     this.isPermissionEnabled = conf.getBoolean("dfs.permissions", true);
     LOG.info("supergroup=" + supergroup);
     LOG.info("isPermissionEnabled=" + isPermissionEnabled);
-    short filePermission = (short)conf.getInt("dfs.upgrade.permission", 0777);
+    short filePermission = (short)conf.getInt("dfs.upgrade.permission", 00777);
     this.defaultPermission = PermissionStatus.createImmutable(
         fsOwner.getUserName(), supergroup, new FsPermission(filePermission));
 

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/PermissionChecker.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/PermissionChecker.java?rev=734590&r1=734589&r2=734590&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/PermissionChecker.java
(original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/PermissionChecker.java
Wed Jan 14 17:36:20 2009
@@ -93,7 +93,7 @@
           + ", access=" + access
           + ", subAccess=" + subAccess);
     }
-
+    // check if (parentAccess != null) && file exists, then check sb
     synchronized(root) {
       INode[] inodes = root.getExistingPathINodes(path);
       int ancestorIndex = inodes.length - 2;
@@ -101,6 +101,10 @@
           ancestorIndex--);
       checkTraverse(inodes, ancestorIndex);
 
+      if(parentAccess != null && parentAccess.implies(FsAction.WRITE)
+          && inodes[inodes.length - 1] != null)
+        checkStickyBit(inodes[inodes.length - 2], inodes[inodes.length - 1]);
+
       if (ancestorAccess != null && inodes.length > 1) {
         check(inodes, ancestorIndex, ancestorAccess);
       }
@@ -176,4 +180,23 @@
     throw new AccessControlException("Permission denied: user=" + user
         + ", access=" + access + ", inode=" + inode);
   }
+
+  private void checkStickyBit(INode parent, INode inode) throws AccessControlException {
+    if(!parent.getFsPermission().getStickyBit()) {
+      return;
+    }
+
+    // If this user is the directory owner, return
+    if(parent.getUserName().equals(user)) {
+      return;
+    }
+
+    // if this user is the file owner, return
+    if(inode.getUserName().equals(user)) {
+      return;
+    }
+
+    throw new AccessControlException("Permission denied by sticky bit setting:" +
+      " user=" + user + ", inode=" + inode);
+  }
 }

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/fs/permission/TestFsPermission.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/fs/permission/TestFsPermission.java?rev=734590&r1=734589&r2=734590&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/fs/permission/TestFsPermission.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/fs/permission/TestFsPermission.java Wed Jan
14 17:36:20 2009
@@ -45,26 +45,72 @@
     assertEquals(WRITE_EXECUTE, ALL.and(WRITE_EXECUTE));
   }
 
-  public void testFsPermission() {
-    for(short s = 0; s < (1<<9); s++) {
+  /**
+   * Ensure that when manually specifying permission modes we get
+   * the expected values back out for all combinations
+   */
+  public void testConvertingPermissions() {
+    for(short s = 0; s < 01777; s++) {
       assertEquals(s, new FsPermission(s).toShort());
     }
 
-    String symbolic = "-rwxrwxrwx";
-    StringBuilder b = new StringBuilder("-123456789");
-    for(int i = 0; i < (1<<9); i++) {
-      for(int j = 1; j < 10; j++) {
-        b.setCharAt(j, '-');
+    short s = 0;
+
+    for(boolean sb : new boolean [] { false, true }) {
+      for(FsAction u : FsAction.values()) {
+        for(FsAction g : FsAction.values()) {
+          for(FsAction o : FsAction.values()) {
+            FsPermission f = new FsPermission(u, g, o, sb);
+            assertEquals(s, f.toShort());
+            s++;
+          }
+        }
       }
-      String binary = Integer.toBinaryString(i);
-      int len = binary.length();
-      for(int j = 0; j < len; j++) {
-        if (binary.charAt(j) == '1') {
-          int k = 9 - (len - 1 - j);
-          b.setCharAt(k, symbolic.charAt(k));
+    }
+  }
+
+  public void testStickyBitToString() {
+    // Check that every permission has its sticky bit represented correctly
+    for(boolean sb : new boolean [] { false, true }) {
+      for(FsAction u : FsAction.values()) {
+        for(FsAction g : FsAction.values()) {
+          for(FsAction o : FsAction.values()) {
+            FsPermission f = new FsPermission(u, g, o, sb);
+            if(f.getStickyBit() && f.getOtherAction().implies(EXECUTE))
+              assertEquals('t', f.toString().charAt(8));
+            else if(f.getStickyBit() && !f.getOtherAction().implies(EXECUTE))
+              assertEquals('T', f.toString().charAt(8));
+            else if(!f.getStickyBit()  && f.getOtherAction().implies(EXECUTE))
+              assertEquals('x', f.toString().charAt(8));
+            else
+              assertEquals('-', f.toString().charAt(8));
+          }
         }
       }
-      assertEquals(i, FsPermission.valueOf(b.toString()).toShort());
     }
   }
+
+  public void testFsPermission() {
+
+      String symbolic = "-rwxrwxrwx";
+      StringBuilder b = new StringBuilder("-123456789");
+
+      for(int i = 0; i < (1<<9); i++) {
+        for(int j = 1; j < 10; j++) {
+          b.setCharAt(j, '-');
+        }
+        String binary = Integer.toBinaryString(i);
+
+        int len = binary.length();
+        for(int j = 0; j < len; j++) {
+          if (binary.charAt(j) == '1') {
+            int k = 9 - (len - 1 - j);
+            b.setCharAt(k, symbolic.charAt(k));
+          }
+        }
+
+        assertEquals(i, FsPermission.valueOf(b.toString()).toShort());
+      }
+    }
+
 }

Added: hadoop/core/trunk/src/test/org/apache/hadoop/fs/permission/TestStickyBit.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/fs/permission/TestStickyBit.java?rev=734590&view=auto
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/fs/permission/TestStickyBit.java (added)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/fs/permission/TestStickyBit.java Wed Jan
14 17:36:20 2009
@@ -0,0 +1,310 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.permission;
+
+import java.io.IOException;
+
+import javax.security.auth.login.LoginException;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UnixUserGroupInformation;
+
+public class TestStickyBit extends TestCase {
+
+  static UnixUserGroupInformation user1 = new UnixUserGroupInformation(
+      "theDoctor", new String[] { "tardis" });
+  static UnixUserGroupInformation user2 = new UnixUserGroupInformation("rose",
+      new String[] { "powellestates" });
+
+  /**
+   * Ensure that even if a file is in a directory with the sticky bit on,
+   * another user can write to that file (assuming correct permissions).
+   */
+  private void confirmCanAppend(Configuration conf, FileSystem hdfs,
+      Path baseDir) throws IOException {
+    // Create a tmp directory with wide-open permissions and sticky bit
+    Path p = new Path(baseDir, "tmp");
+
+    hdfs.mkdirs(p);
+    hdfs.setPermission(p, new FsPermission((short) 01777));
+
+    // Write a file to the new tmp directory as a regular user
+    hdfs = logonAs(user1, conf, hdfs);
+    Path file = new Path(p, "foo");
+    writeFile(hdfs, file);
+    hdfs.setPermission(file, new FsPermission((short) 0777));
+
+    // Log onto cluster as another user and attempt to append to file
+    hdfs = logonAs(user2, conf, hdfs);
+    Path file2 = new Path(p, "foo");
+    FSDataOutputStream h = hdfs.append(file2);
+    h.write("Some more data".getBytes());
+    h.close();
+  }
+
+  /**
+   * Test that one user can't delete another user's file when the sticky bit is
+   * set.
+   */
+  private void confirmDeletingFiles(Configuration conf, FileSystem hdfs,
+      Path baseDir) throws IOException {
+    Path p = new Path(baseDir, "contemporary");
+    hdfs.mkdirs(p);
+    hdfs.setPermission(p, new FsPermission((short) 01777));
+
+    // Write a file to the new temp directory as a regular user
+    hdfs = logonAs(user1, conf, hdfs);
+    Path file = new Path(p, "foo");
+    writeFile(hdfs, file);
+
+    // Make sure the correct user is the owner
+    assertEquals(user1.getUserName(), hdfs.getFileStatus(file).getOwner());
+
+    // Log onto cluster as another user and attempt to delete the file
+    FileSystem hdfs2 = logonAs(user2, conf, hdfs);
+
+    try {
+      hdfs2.delete(file, false);
+      fail("Shouldn't be able to delete someone else's file with SB on");
+    } catch (IOException ioe) {
+      assertTrue(ioe instanceof AccessControlException);
+      assertTrue(ioe.getMessage().contains("sticky bit"));
+    }
+  }
+
+  /**
+   * Test that if a directory is created in a directory that has the sticky bit
+   * on, the new directory does not automatically get a sticky bit, as is
+   * standard Unix behavior
+   */
+  private void confirmStickyBitDoesntPropagate(FileSystem hdfs, Path baseDir)
+      throws IOException {
+    Path p = new Path(baseDir, "scissorsisters");
+
+    // Turn on its sticky bit
+    hdfs.mkdirs(p, new FsPermission((short) 01666));
+
+    // Create a subdirectory within it
+    Path p2 = new Path(p, "bar");
+    hdfs.mkdirs(p2);
+
+    // Ensure new directory doesn't have its sticky bit on
+    assertFalse(hdfs.getFileStatus(p2).getPermission().getStickyBit());
+  }
+
+  /**
+   * Test basic ability to get and set sticky bits on files and directories.
+   */
+  private void confirmSettingAndGetting(FileSystem hdfs, Path baseDir)
+      throws IOException {
+    Path p1 = new Path(baseDir, "roguetraders");
+
+    hdfs.mkdirs(p1);
+
+    // Initially sticky bit should not be set
+    assertFalse(hdfs.getFileStatus(p1).getPermission().getStickyBit());
+
+    // Same permission, but with sticky bit on
+    short withSB;
+    withSB = (short) (hdfs.getFileStatus(p1).getPermission().toShort() | 01000);
+
+    assertTrue((new FsPermission(withSB)).getStickyBit());
+
+    hdfs.setPermission(p1, new FsPermission(withSB));
+    assertTrue(hdfs.getFileStatus(p1).getPermission().getStickyBit());
+
+    // However, while you can set the sticky bit on files, it has no effect,
+    // following the linux/unix model:
+    //
+    // [user@host test]$ ls -alh
+    // -rw-r--r-- 1 user users 0 Dec 31 01:46 aFile
+    // [user@host test]$ chmod +t aFile
+    // [user@host test]$ ls -alh
+    // -rw-r--r-- 1 user users 0 Dec 31 01:46 aFile
+
+    // Write a file to the fs, try to set its sticky bit, expect to be ignored
+    Path f = new Path(baseDir, "somefile");
+    writeFile(hdfs, f);
+    assertFalse(hdfs.getFileStatus(f).getPermission().getStickyBit());
+
+    withSB = (short) (hdfs.getFileStatus(f).getPermission().toShort() | 01000);
+
+    hdfs.setPermission(f, new FsPermission(withSB));
+
+    assertFalse(hdfs.getFileStatus(f).getPermission().getStickyBit());
+  }
+
+  public void testGeneralSBBehavior() throws IOException {
+    MiniDFSCluster cluster = null;
+    try {
+      Configuration conf = new Configuration();
+      conf.setBoolean("dfs.permissions", true);
+      cluster = new MiniDFSCluster(conf, 4, true, null);
+
+      FileSystem hdfs = cluster.getFileSystem();
+
+      assertTrue(hdfs instanceof DistributedFileSystem);
+
+      Path baseDir = new Path("/mcgann");
+      hdfs.mkdirs(baseDir);
+      confirmCanAppend(conf, hdfs, baseDir);
+
+      baseDir = new Path("/eccleston");
+      hdfs.mkdirs(baseDir);
+      confirmSettingAndGetting(hdfs, baseDir);
+
+      baseDir = new Path("/tennant");
+      hdfs.mkdirs(baseDir);
+      confirmDeletingFiles(conf, hdfs, baseDir);
+
+      baseDir = new Path("/smith");
+      hdfs.mkdirs(baseDir);
+      confirmStickyBitDoesntPropagate(hdfs, baseDir);
+
+    } finally {
+      if (cluster != null)
+        cluster.shutdown();
+    }
+  }
+
+  /**
+   * Test that one user can't rename/move another user's file when the sticky
+   * bit is set.
+   */
+  public void testMovingFiles() throws IOException, LoginException {
+    MiniDFSCluster cluster = null;
+
+    try {
+      // Set up cluster for testing
+      Configuration conf = new Configuration();
+      conf.setBoolean("dfs.permissions", true);
+      cluster = new MiniDFSCluster(conf, 4, true, null);
+      FileSystem hdfs = cluster.getFileSystem();
+
+      assertTrue(hdfs instanceof DistributedFileSystem);
+
+      // Create a tmp directory with wide-open permissions and sticky bit
+      Path tmpPath = new Path("/tmp");
+      Path tmpPath2 = new Path("/tmp2");
+      hdfs.mkdirs(tmpPath);
+      hdfs.mkdirs(tmpPath2);
+      hdfs.setPermission(tmpPath, new FsPermission((short) 01777));
+      hdfs.setPermission(tmpPath2, new FsPermission((short) 01777));
+
+      // Write a file to the new tmp directory as a regular user
+      Path file = new Path(tmpPath, "foo");
+
+      FileSystem hdfs2 = logonAs(user1, conf, hdfs);
+
+      writeFile(hdfs2, file);
+
+      // Log onto cluster as another user and attempt to move the file
+      FileSystem hdfs3 = logonAs(user2, conf, hdfs);
+
+      try {
+        hdfs3.rename(file, new Path(tmpPath2, "renamed"));
+        fail("Shouldn't be able to rename someone else's file with SB on");
+      } catch (IOException ioe) {
+        assertTrue(ioe instanceof AccessControlException);
+        assertTrue(ioe.getMessage().contains("sticky bit"));
+      }
+    } finally {
+      if (cluster != null)
+        cluster.shutdown();
+    }
+  }
+
+  /**
+   * Ensure that when we set a sticky bit and shut down the file system, we get
+   * the sticky bit back on re-start, and that no extra sticky bits appear after
+   * re-start.
+   */
+  public void testStickyBitPersistence() throws IOException {
+    MiniDFSCluster cluster = null;
+    try {
+      Configuration conf = new Configuration();
+      conf.setBoolean("dfs.permissions", true);
+      cluster = new MiniDFSCluster(conf, 4, true, null);
+      FileSystem hdfs = cluster.getFileSystem();
+
+      assertTrue(hdfs instanceof DistributedFileSystem);
+
+      // A tale of three directories...
+      Path sbSet = new Path("/Housemartins");
+      Path sbNotSpecified = new Path("/INXS");
+      Path sbSetOff = new Path("/Easyworld");
+
+      for (Path p : new Path[] { sbSet, sbNotSpecified, sbSetOff })
+        hdfs.mkdirs(p);
+
+      // Two directories had there sticky bits set explicitly...
+      hdfs.setPermission(sbSet, new FsPermission((short) 01777));
+      hdfs.setPermission(sbSetOff, new FsPermission((short) 00777));
+
+      cluster.shutdown();
+
+      // Start file system up again
+      cluster = new MiniDFSCluster(conf, 4, false, null);
+      hdfs = cluster.getFileSystem();
+
+      assertTrue(hdfs.exists(sbSet));
+      assertTrue(hdfs.getFileStatus(sbSet).getPermission().getStickyBit());
+
+      assertTrue(hdfs.exists(sbNotSpecified));
+      assertFalse(hdfs.getFileStatus(sbNotSpecified).getPermission()
+          .getStickyBit());
+
+      assertTrue(hdfs.exists(sbSetOff));
+      assertFalse(hdfs.getFileStatus(sbSetOff).getPermission().getStickyBit());
+
+    } finally {
+      if (cluster != null)
+        cluster.shutdown();
+    }
+  }
+
+  /***
+   * Create a new configuration for the specified user and return a filesystem
+   * accessed by that user
+   */
+  static private FileSystem logonAs(UnixUserGroupInformation user,
+      Configuration conf, FileSystem hdfs) throws IOException {
+    Configuration conf2 = new Configuration(conf);
+    UnixUserGroupInformation.saveToConf(conf2,
+        UnixUserGroupInformation.UGI_PROPERTY_NAME, user);
+
+    return FileSystem.get(conf2);
+  }
+
+  /***
+   * Write a quick file to the specified file system at specified path
+   */
+  static private void writeFile(FileSystem hdfs, Path p) throws IOException {
+    FSDataOutputStream o = hdfs.create(p);
+    o.write("some file contents".getBytes());
+    o.close();
+  }
+}

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDFSShell.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDFSShell.java?rev=734590&r1=734589&r2=734590&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDFSShell.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDFSShell.java Wed Jan 14 17:36:20
2009
@@ -442,7 +442,6 @@
     }
   }
   
-
   public void testURIPaths() throws Exception {
     Configuration srcConf = new Configuration();
     Configuration dstConf = new Configuration();
@@ -758,18 +757,15 @@
      fs.delete(dir, true);
      fs.mkdirs(dir);
 
-     runCmd(shell, "-chmod", "u+rwx,g=rw,o-rwx", chmodDir);
-     assertEquals("rwxrw----",
-                  fs.getFileStatus(dir).getPermission().toString());
-
+     confirmPermissionChange(/* Setting */ "u+rwx,g=rw,o-rwx",
+                             /* Should give */ "rwxrw----", fs, shell, dir);
+     
      //create an empty file
      Path file = new Path(chmodDir, "file");
      TestDFSShell.writeFile(fs, file);
 
      //test octal mode
-     runCmd(shell, "-chmod", "644", file.toString());
-     assertEquals("rw-r--r--",
-                  fs.getFileStatus(file).getPermission().toString());
+     confirmPermissionChange( "644", "rw-r--r--", fs, shell, file);
 
      //test recursive
      runCmd(shell, "-chmod", "-R", "a+rwX", chmodDir);
@@ -777,8 +773,28 @@
                   fs.getFileStatus(dir).getPermission().toString()); 
      assertEquals("rw-rw-rw-",
                   fs.getFileStatus(file).getPermission().toString());
+
+     // test sticky bit on directories
+     Path dir2 = new Path(dir, "stickybit" );
+     fs.mkdirs(dir2 );
      
-     fs.delete(dir, true);     
+     assertEquals("rwxr-xr-x", fs.getFileStatus(dir2).getPermission()
+         .toString());
+     
+     confirmPermissionChange("+t", "rwxr-xr-t", fs, shell, dir2);
+
+     confirmPermissionChange("-t", "rwxr-xr-x", fs, shell, dir2);
+
+     confirmPermissionChange("=t", "--------T", fs, shell, dir2);
+
+     confirmPermissionChange("0000", "---------", fs, shell, dir2);
+
+     confirmPermissionChange("1666", "rw-rw-rwT", fs, shell, dir2);
+
+     confirmPermissionChange("777", "rwxrwxrwt", fs, shell, dir2);
+
+     fs.delete(dir, true);
+     fs.delete(dir2, true);
     } finally {
       try {
         fs.close();
@@ -786,7 +802,16 @@
       } catch (IOException ignored) {}
     }
   }
-  
+
+  // Apply a new permission to a path and confirm that the new permission
+  // is the one you were expecting
+  private void confirmPermissionChange(String toApply, String expected,
+      FileSystem fs, FsShell shell, Path dir2) throws IOException {
+    runCmd(shell, "-chmod", toApply, dir2.toString());
+    
+    assertEquals(expected, fs.getFileStatus(dir2).getPermission().toString());
+  }
+   
   private void confirmOwner(String owner, String group, 
                             FileSystem fs, Path... paths) throws IOException {
     for(Path path : paths) {
@@ -820,7 +845,7 @@
     shell.setConf(conf);
     fs = cluster.getFileSystem();
     
-    /* For dfs, I am the super user and I can change ower of any file to
+    /* For dfs, I am the super user and I can change owner of any file to
      * anything. "-R" option is already tested by chmod test above.
      */
     



Mime
View raw message