hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sur...@apache.org
Subject svn commit: r1387811 - in /hadoop/common/branches/branch-1-win: ./ src/core/org/apache/hadoop/fs/ src/core/org/apache/hadoop/io/ src/core/org/apache/hadoop/util/ src/hdfs/org/apache/hadoop/hdfs/server/datanode/ src/mapred/org/apache/hadoop/mapred/ src/...
Date Wed, 19 Sep 2012 23:15:44 GMT
Author: suresh
Date: Wed Sep 19 23:15:43 2012
New Revision: 1387811

URL: http://svn.apache.org/viewvc?rev=1387811&view=rev
Log:
HADOOP-8457. Address file ownership issue for users in Admin group on windows. Contributed
by Ivan Mitc.

Modified:
    hadoop/common/branches/branch-1-win/CHANGES.branch-1-win.txt
    hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/fs/FileStatus.java
    hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/fs/FileUtil.java
    hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/fs/RawLocalFileSystem.java
    hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/io/SecureIOUtils.java
    hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/util/DiskChecker.java
    hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/util/Shell.java
    hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/common/branches/branch-1-win/src/mapred/org/apache/hadoop/mapred/JobTracker.java
    hadoop/common/branches/branch-1-win/src/mapred/org/apache/hadoop/mapreduce/JobSubmissionFiles.java
    hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java
    hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDFSShell.java
    hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/UpgradeUtilities.java

Modified: hadoop/common/branches/branch-1-win/CHANGES.branch-1-win.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/CHANGES.branch-1-win.txt?rev=1387811&r1=1387810&r2=1387811&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/CHANGES.branch-1-win.txt (original)
+++ hadoop/common/branches/branch-1-win/CHANGES.branch-1-win.txt Wed Sep 19 23:15:43 2012
@@ -127,3 +127,6 @@ BUG FIXES
 
     MAPREDUCE-4564. Fix issues with timing out shell processes. (Bikas Saha
     via acmurthy)
+
+    HADOOP-8457. Address file ownership issue for users in Admin group on
+    windows. (Ivan Mitic via suresh)

Modified: hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/fs/FileStatus.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/fs/FileStatus.java?rev=1387811&r1=1387810&r2=1387811&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/fs/FileStatus.java (original)
+++ hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/fs/FileStatus.java Wed
Sep 19 23:15:43 2012
@@ -148,6 +148,18 @@ public class FileStatus implements Writa
     return path;
   }
 
+  /**
+   * Checks if the given user is an owner of this file.
+   * @return true if yes, false otherwise
+   */
+  public boolean isOwnedByUser(String user, String [] userGroups) {
+    if (user == null) {
+      throw new IllegalArgumentException(
+          "user argument is null");
+    }
+    return owner.equals(user);
+  }
+
   /* These are provided so that these values could be loaded lazily 
    * by a filesystem (e.g. local file system).
    */

Modified: hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/fs/FileUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/fs/FileUtil.java?rev=1387811&r1=1387810&r2=1387811&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/fs/FileUtil.java (original)
+++ hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/fs/FileUtil.java Wed Sep
19 23:15:43 2012
@@ -698,23 +698,14 @@ public class FileUtil {
    * @param recursive true, if permissions should be changed recursively
    * @return the exit code from the command.
    * @throws IOException
-   * @throws InterruptedException
    */
   public static int chmod(String filename, String perm, boolean recursive)
                             throws IOException {
-    if (Shell.DISABLEWINDOWS_TEMPORARILY) {
-      return 0;
-    }
-
-    StringBuffer cmdBuf = new StringBuffer();
-    cmdBuf.append("chmod ");
-    if (recursive) {
-      cmdBuf.append("-R ");
-    }
-    cmdBuf.append(perm).append(" ");
-    cmdBuf.append(filename);
-    String[] shellCmd = {"bash", "-c" ,cmdBuf.toString()};
-    ShellCommandExecutor shExec = new ShellCommandExecutor(shellCmd);
+    String [] cmd = Shell.getSetPermissionCommand(perm, recursive);
+    String[] args = new String[cmd.length + 1];
+    System.arraycopy(cmd, 0, args, 0, cmd.length);
+    args[cmd.length] = filename;
+    ShellCommandExecutor shExec = new ShellCommandExecutor(args);
     try {
       shExec.execute();
     }catch(IOException e) {
@@ -790,7 +781,7 @@ public class FileUtil {
       NativeIO.chmod(f.getCanonicalPath(), permission.toShort());
     } else {
       execCommand(f, Shell.getSetPermissionCommand(
-                  String.format("%04o", permission.toShort())));
+                  String.format("%04o", permission.toShort()), false));
     }
   }
   

Modified: hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/fs/RawLocalFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/fs/RawLocalFileSystem.java?rev=1387811&r1=1387810&r2=1387811&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/fs/RawLocalFileSystem.java
(original)
+++ hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/fs/RawLocalFileSystem.java
Wed Sep 19 23:15:43 2012
@@ -464,6 +464,38 @@ public class RawLocalFileSystem extends 
       return super.getGroup();
     }
 
+    @Override
+    public boolean isOwnedByUser(String user, String [] userGroups) {
+      if (user == null) {
+        throw new IllegalArgumentException(
+            "user argument is null");
+      }
+      if (!isPermissionLoaded()) {
+        loadPermissionInfo();
+      }
+
+      String owner = super.getOwner();
+      boolean success = owner.equals(user);
+
+      if (!success && Shell.WINDOWS && userGroups != null) {
+        final String AdminsGroupString = "Administrators";
+
+        // On Windows Server 2003 and later, if a file or a directory is
+        // created by users in the Administrators group, the file owner will be
+        // the Administrators group instead of to the actual user. Since it
+        // would be technically challenging to go against the OS behavior
+        // and update all such cases by explicitly setting the ownership on
+        // Windows (and would have some performance implications), we are
+        // following the OS model. Specifically, if a given user is a member of
+        // the Administrators group and a file is owned by Administrators
+        // group, isOwnedByUser will return true.
+        success = owner.equals(AdminsGroupString)
+            && Arrays.asList(userGroups).contains(AdminsGroupString);
+      }
+
+      return success;
+    }
+
     /// loads permissions, owner, and group from `ls -ld`
     private void loadPermissionInfo() {
       IOException e = null;

Modified: hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/io/SecureIOUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/io/SecureIOUtils.java?rev=1387811&r1=1387810&r2=1387811&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/io/SecureIOUtils.java (original)
+++ hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/io/SecureIOUtils.java Wed
Sep 19 23:15:43 2012
@@ -105,10 +105,7 @@ public class SecureIOUtils {
       return fis;
     }
     if (skipSecurity) {
-      // Subject to race conditions but this is the best we can do
-      FileStatus status =
-        rawFilesystem.getFileStatus(new Path(f.getAbsolutePath()));
-      checkStat(f, status.getOwner(), expectedOwner);
+      checkStatFileSystem(f, expectedOwner);
       return fis;
     }
 
@@ -116,6 +113,7 @@ public class SecureIOUtils {
     try {
       String owner = NativeIO.getOwner(fis.getFD());
       checkStat(f, owner, expectedOwner);
+
       success = true;
       return fis;
     } finally {
@@ -173,8 +171,6 @@ public class SecureIOUtils {
   }
 
   private static void checkStat(File f, String owner, String expectedOwner) throws IOException
{
-    if (Shell.DISABLEWINDOWS_TEMPORARILY)
-      return;
     if (expectedOwner != null &&
         !expectedOwner.equals(owner)) {
       throw new IOException(
@@ -183,6 +179,25 @@ public class SecureIOUtils {
     }
   }
 
+  private static void checkStatFileSystem(File f, String expectedOwner)
+  throws IOException {
+    // Subject to race conditions but this is the best we can do
+    FileStatus status =
+      rawFilesystem.getFileStatus(new Path(f.getAbsolutePath()));
+    // Create ugi and check file ownership through isOwnedByUser
+    // as there are some specific OS dependent considerations for
+    // checking the ownership.
+    UserGroupInformation ugi =
+      UserGroupInformation.createRemoteUser(expectedOwner);
+
+    if (expectedOwner != null &&
+        !status.isOwnedByUser(ugi.getShortUserName(), ugi.getGroupNames())) {
+      throw new IOException(
+        "Owner '" + status.getOwner() + "' for path " + f + " did not match " +
+        "expected owner '" + expectedOwner + "'");
+    }
+  }
+
   /**
    * Signals that an attempt to create a file at a given pathname has failed
    * because another file already existed at that path.

Modified: hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/util/DiskChecker.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/util/DiskChecker.java?rev=1387811&r1=1387810&r2=1387811&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/util/DiskChecker.java (original)
+++ hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/util/DiskChecker.java Wed
Sep 19 23:15:43 2012
@@ -103,8 +103,6 @@ public class DiskChecker {
                                      FsPermission expected, FsPermission actual) 
   throws IOException {
     // Check for permissions
-    if (org.apache.hadoop.util.Shell.DISABLEWINDOWS_TEMPORARILY)
-      return;
     if (!actual.equals(expected)) {
       throw new IOException("Incorrect permission for " + dir + 
                             ", expected: " + expected + ", while actual: " + 

Modified: hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/util/Shell.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/util/Shell.java?rev=1387811&r1=1387810&r2=1387811&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/util/Shell.java (original)
+++ hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/util/Shell.java Wed Sep
19 23:15:43 2012
@@ -81,11 +81,16 @@ abstract public class Shell {
   }
 
   /** Return a command to set permission */
-  public static String[] getSetPermissionCommand(String perm) {
-    return (WINDOWS) ? new String[] { WINUTILS, "chmod", perm }
-                     : new String[] { "chmod", perm };
+  public static String[] getSetPermissionCommand(String perm, boolean recursive) {
+    if (recursive) {
+      return (WINDOWS) ? new String[] { WINUTILS, "chmod", "-R", perm }
+                         : new String[] { "chmod", "-R", perm };
+    } else {
+      return (WINDOWS) ? new String[] { WINUTILS, "chmod", perm }
+                       : new String[] { "chmod", perm };
+    }
   }
-  
+
   /** Return a regular expression string that match environment variables */
   public static String getEnvironmentVariableRegex() {
     return (WINDOWS) ? "%([A-Za-z_][A-Za-z0-9_]*?)%" :

Modified: hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1387811&r1=1387810&r2=1387811&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
(original)
+++ hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
Wed Sep 19 23:15:43 2012
@@ -247,7 +247,12 @@ public class DataNode extends Configured
   public static final String DATA_DIR_KEY = "dfs.data.dir";
   public final static String DATA_DIR_PERMISSION_KEY = 
     "dfs.datanode.data.dir.perm";
-  private static final String DEFAULT_DATA_DIR_PERMISSION = "755";
+
+  /** 
+   * Default permissions set by the DataNode if DATA_DIR_PERMISSION_KEY
+   * configuration is not set
+   */
+  public static final String DEFAULT_DATA_DIR_PERMISSION = "755";
 
   // Thresholds for when we start to log when a block report is
   // taking a long time to generate. Under heavy disk load and

Modified: hadoop/common/branches/branch-1-win/src/mapred/org/apache/hadoop/mapred/JobTracker.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/mapred/org/apache/hadoop/mapred/JobTracker.java?rev=1387811&r1=1387810&r2=1387811&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/mapred/org/apache/hadoop/mapred/JobTracker.java
(original)
+++ hadoop/common/branches/branch-1-win/src/mapred/org/apache/hadoop/mapred/JobTracker.java
Wed Sep 19 23:15:43 2012
@@ -2362,8 +2362,9 @@ public class JobTracker implements MRCon
     while (!Thread.currentThread().isInterrupted()) {
       try {
         // if we haven't contacted the namenode go ahead and do it
+        UserGroupInformation mrOwner = getMROwner();
         if (fs == null) {
-          fs = getMROwner().doAs(new PrivilegedExceptionAction<FileSystem>() {
+          fs = mrOwner.doAs(new PrivilegedExceptionAction<FileSystem>() {
             public FileSystem run() throws IOException {
               return FileSystem.get(conf);
           }});
@@ -2375,10 +2376,10 @@ public class JobTracker implements MRCon
         }
         try {
           FileStatus systemDirStatus = fs.getFileStatus(systemDir);
-          if (!systemDirStatus.getOwner().equals(
-              getMROwner().getShortUserName())) {
+          if (!systemDirStatus.isOwnedByUser(
+                   mrOwner.getShortUserName(), mrOwner.getGroupNames())) {
             throw new AccessControlException("The systemdir " + systemDir +
-                " is not owned by " + getMROwner().getShortUserName());
+                " is not owned by " + mrOwner.getShortUserName());
           }
           if (!systemDirStatus.getPermission().equals(SYSTEM_DIR_PERMISSION)) {
             LOG.warn("Incorrect permissions on " + systemDir +
@@ -2411,12 +2412,6 @@ public class JobTracker implements MRCon
         fs.delete(systemDir, true);
         if (FileSystem.mkdirs(fs, systemDir, 
             new FsPermission(SYSTEM_DIR_PERMISSION))) {
-          if (Shell.WINDOWS) {
-            // Explicitly set ownership on Windows, as in some scenarios
-            // Administrators group would end up being the owner what is
-            // currently not supported by the Hadoop security model.
-            fs.setOwner(systemDir, getMROwner().getShortUserName(), null);
-          }
           break;
         }
         LOG.error("Mkdirs failed to create " + systemDir);

Modified: hadoop/common/branches/branch-1-win/src/mapred/org/apache/hadoop/mapreduce/JobSubmissionFiles.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/mapred/org/apache/hadoop/mapreduce/JobSubmissionFiles.java?rev=1387811&r1=1387810&r2=1387811&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/mapred/org/apache/hadoop/mapreduce/JobSubmissionFiles.java
(original)
+++ hadoop/common/branches/branch-1-win/src/mapred/org/apache/hadoop/mapreduce/JobSubmissionFiles.java
Wed Sep 19 23:15:43 2012
@@ -100,31 +100,22 @@ public class JobSubmissionFiles {
     String currentUser;
     UserGroupInformation ugi = UserGroupInformation.getLoginUser();
     realUser = ugi.getShortUserName();
-    currentUser = UserGroupInformation.getCurrentUser().getShortUserName();
+    UserGroupInformation currentUgi = UserGroupInformation.getCurrentUser();
+    currentUser = currentUgi.getShortUserName();
     if (fs.exists(stagingArea)) {
       FileStatus fsStatus = fs.getFileStatus(stagingArea);
-      String owner = fsStatus.getOwner();
-      if (!(owner.equals(currentUser) || owner.equals(realUser)) || 
-          !fsStatus.getPermission().equals(JOB_DIR_PERMISSION)) {
+      if (!(fsStatus.isOwnedByUser(currentUser, currentUgi.getGroupNames())
+            || fsStatus.isOwnedByUser(realUser, ugi.getGroupNames()))
+          || !fsStatus.getPermission().equals(JOB_DIR_PERMISSION)) {
          throw new IOException("The ownership/permissions on the staging " +
                       "directory " + stagingArea + " is not as expected. " + 
-                      "It is owned by " + owner + " and permissions are "+ 
+                      "It is owned by " + fsStatus.getOwner() + " and permissions are "+

                       fsStatus.getPermission() + ". The directory must " +
                       "be owned by the submitter " + currentUser + " or " +
                       "by " + realUser + " and permissions must be rwx------");
       }
     } else {
-      if (fs.mkdirs(stagingArea, new FsPermission(JOB_DIR_PERMISSION))) {
-        if (Shell.WINDOWS) {
-          // On Windows, if a file or directory is created by users in
-          // Administrators group, the file owner will be the Administrators
-          // group as opposed to the actual users. This causes problem
-          // because Hadoop security model assumes whoever created the
-          // file should be the owner. We explicitly set ownership here
-          // to fix the problem on Windows.
-          fs.setOwner(stagingArea, realUser, null);
-        }
-      }
+      fs.mkdirs(stagingArea, new FsPermission(JOB_DIR_PERMISSION));
     }
     return stagingArea;
   }

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1387811&r1=1387810&r2=1387811&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java
(original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java
Wed Sep 19 23:15:43 2012
@@ -32,6 +32,7 @@ import java.util.Random;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
@@ -389,6 +390,16 @@ public class MiniDFSCluster {
           throw new IOException("Mkdirs failed to create directory for DataNode "
                                 + i + ": " + dir1 + " or " + dir2);
         }
+
+        // Set default permissions on data dirs as not all platforms have the
+        // same defaults
+        FileUtil.setPermission(dir1, new FsPermission(
+            conf.get("dfs.datanode.data.dir.perm",
+                     DataNode.DEFAULT_DATA_DIR_PERMISSION)));
+        FileUtil.setPermission(dir2, new FsPermission(
+            conf.get("dfs.datanode.data.dir.perm",
+                     DataNode.DEFAULT_DATA_DIR_PERMISSION)));
+
         dnConf.set(DataNode.DATA_DIR_KEY, dir1.getPath() + "," + dir2.getPath());
       }
       if (simulatedCapacities != null) {

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDFSShell.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDFSShell.java?rev=1387811&r1=1387810&r2=1387811&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDFSShell.java
(original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/TestDFSShell.java
Wed Sep 19 23:15:43 2012
@@ -806,11 +806,7 @@ public class TestDFSShell extends TestCa
   
   public void testFilePermissions() throws IOException {
     Configuration conf = new Configuration();    
-    if(Shell.WINDOWS) {
-      // windows does not match the expected security semantics
-      return;
-    }
-    
+
     //test chmod on local fs
     FileSystem fs = FileSystem.getLocal(conf);
     testChmod(conf, fs, 
@@ -1146,10 +1142,6 @@ public class TestDFSShell extends TestCa
   }
 
   public void testRemoteException() throws Exception {
-    if(Shell.WINDOWS) {
-      // windows does not match the expected security semantics
-      return;
-    }
     UserGroupInformation tmpUGI = 
       UserGroupInformation.createUserForTesting("tmpname", new String[] {"mygroup"});
     MiniDFSCluster dfs = null;
@@ -1256,10 +1248,6 @@ public class TestDFSShell extends TestCa
   }
 
   public void testLsr() throws Exception {
-    if(Shell.WINDOWS) {
-      // windows does not match the expected security semantics
-      return;
-    }
     final Configuration conf = new Configuration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/UpgradeUtilities.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/UpgradeUtilities.java?rev=1387811&r1=1387810&r2=1387811&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/UpgradeUtilities.java
(original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/UpgradeUtilities.java
Wed Sep 19 23:15:43 2012
@@ -33,6 +33,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
@@ -44,6 +45,7 @@ import static org.apache.hadoop.hdfs.ser
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
 import org.apache.hadoop.hdfs.server.namenode.FSImage;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -74,6 +76,8 @@ public class UpgradeUtilities {
   private static File datanodeStorage = new File(TEST_ROOT_DIR, "datanodeMaster");
   // A checksum of the contents in datanodeStorage directory
   private static long datanodeStorageChecksum;
+  // Default permissions used for data directories
+  private static String defaultPermissions;
   
   /**
    * Initialize the data structures used by this class.  
@@ -86,10 +90,12 @@ public class UpgradeUtilities {
    * block files).  This can be a lengthy operation.
    */
   public static void initialize() throws Exception {
-    createEmptyDirs(new String[] {TEST_ROOT_DIR.toString()});
     Configuration config = new Configuration();
     config.set("dfs.name.dir", namenodeStorage.toString());
     config.set("dfs.data.dir", datanodeStorage.toString());
+    defaultPermissions = config.get("dfs.datanode.data.dir.perm",
+                                    DataNode.DEFAULT_DATA_DIR_PERMISSION);
+    createEmptyDirs(new String[] {TEST_ROOT_DIR.toString()});
     MiniDFSCluster cluster = null;
     try {
       // format data-node
@@ -178,7 +184,9 @@ public class UpgradeUtilities {
       if (dir.exists()) {
         FileUtil.fullyDelete(dir);
       }
-      dir.mkdirs();
+      if (dir.mkdirs()) {
+        FileUtil.setPermission(dir, new FsPermission(defaultPermissions));
+      }
     }
   }
   
@@ -264,9 +272,12 @@ public class UpgradeUtilities {
   public static File[] createStorageDirs(NodeType nodeType, String[] parents, String dirName)
throws Exception {
     File[] retVal = new File[parents.length];
     for (int i = 0; i < parents.length; i++) {
+      File parent = new File(parents[i]);
       File newDir = new File(parents[i], dirName);
+      Configuration conf = new Configuration();
       createEmptyDirs(new String[] {newDir.toString()});
-      LocalFileSystem localFS = FileSystem.getLocal(new Configuration());
+      FileUtil.setPermission(parent, new FsPermission(defaultPermissions));
+      LocalFileSystem localFS = FileSystem.getLocal(conf);
       switch (nodeType) {
       case NAME_NODE:
         localFS.copyToLocalFile(new Path(namenodeStorage.toString(), "current"),



Mime
View raw message