hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From s..@apache.org
Subject svn commit: r1346212 - in /hadoop/common/branches/branch-0.22/common: CHANGES.txt src/java/org/apache/hadoop/fs/LocalDirAllocator.java src/test/core/org/apache/hadoop/fs/TestLocalDirAllocator.java
Date Tue, 05 Jun 2012 01:35:01 GMT
Author: shv
Date: Tue Jun  5 01:35:01 2012
New Revision: 1346212

URL: http://svn.apache.org/viewvc?rev=1346212&view=rev
Log:
HADOOP-7338. LocalDirAllocator improvements for MR-2178. Contributed by Benoy Antony.

Modified:
    hadoop/common/branches/branch-0.22/common/CHANGES.txt
    hadoop/common/branches/branch-0.22/common/src/java/org/apache/hadoop/fs/LocalDirAllocator.java
    hadoop/common/branches/branch-0.22/common/src/test/core/org/apache/hadoop/fs/TestLocalDirAllocator.java

Modified: hadoop/common/branches/branch-0.22/common/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.22/common/CHANGES.txt?rev=1346212&r1=1346211&r2=1346212&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.22/common/CHANGES.txt (original)
+++ hadoop/common/branches/branch-0.22/common/CHANGES.txt Tue Jun  5 01:35:01 2012
@@ -18,6 +18,11 @@ Release 0.22.1 - Unreleased
     HADOOP-7680 TestHardLink fails on Mac OS X, when gnu stat is in path.
     (Milind Bhandarkar via stevel)
 
+  SUBTASKS OF HADOOP-8357. Restore security in Hadoop 0.22 branch.
+
+    HADOOP-7338. LocalDirAllocator improvements for MR-2178.
+    (Benoy Antony via shv)
+
 Release 0.22.0 - 2011-11-29
 
   INCOMPATIBLE CHANGES

Modified: hadoop/common/branches/branch-0.22/common/src/java/org/apache/hadoop/fs/LocalDirAllocator.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.22/common/src/java/org/apache/hadoop/fs/LocalDirAllocator.java?rev=1346212&r1=1346211&r2=1346212&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.22/common/src/java/org/apache/hadoop/fs/LocalDirAllocator.java
(original)
+++ hadoop/common/branches/branch-0.22/common/src/java/org/apache/hadoop/fs/LocalDirAllocator.java
Tue Jun  5 01:35:01 2012
@@ -128,8 +128,26 @@ public class LocalDirAllocator {
    */
   public Path getLocalPathForWrite(String pathStr, long size, 
       Configuration conf) throws IOException {
+	return getLocalPathForWrite(pathStr, size, conf, true);
+  }
+
+  /** Get a path from the local FS. Pass size as 
+   *  SIZE_UNKNOWN if not known apriori. We
+   *  round-robin over the set of disks (via the configured dirs) and return
+   *  the first complete path which has enough space 
+   *  @param pathStr the requested path (this will be created on the first 
+   *  available disk)
+   *  @param size the size of the file that is going to be written
+   *  @param conf the Configuration object
+   *  @param checkWrite ensure that the path is writable
+   *  @return the complete path to the file on a local disk
+   *  @throws IOException
+   */
+  public Path getLocalPathForWrite(String pathStr, long size, 
+                                   Configuration conf,
+                                   boolean checkWrite) throws IOException {
     AllocatorPerContext context = obtainContext(contextCfgItemName);
-    return context.getLocalPathForWrite(pathStr, size, conf);
+    return context.getLocalPathForWrite(pathStr, size, conf, checkWrite);
   }
   
   /** Get a path from the local FS for reading. We search through all the
@@ -214,7 +232,8 @@ public class LocalDirAllocator {
     /** This method gets called everytime before any read/write to make sure
      * that any change to localDirs is reflected immediately.
      */
-    private void confChanged(Configuration conf) throws IOException {
+    private synchronized void confChanged(Configuration conf
+                                          ) throws IOException {
       String newLocalDirs = conf.get(contextCfgItemName);
       if (!newLocalDirs.equals(savedLocalDirs)) {
         localDirs = conf.getTrimmedStrings(contextCfgItemName);
@@ -252,18 +271,21 @@ public class LocalDirAllocator {
       }
     }
 
-    private Path createPath(String path) throws IOException {
+    private Path createPath(String path, 
+    		boolean checkWrite) throws IOException {
       Path file = new Path(new Path(localDirs[dirNumLastAccessed]),
                                     path);
-      //check whether we are able to create a directory here. If the disk
-      //happens to be RDONLY we will fail
-      try {
-        DiskChecker.checkDir(new File(file.getParent().toUri().getPath()));
-        return file;
-      } catch (DiskErrorException d) {
-        LOG.warn(StringUtils.stringifyException(d));
-        return null;
+      if (checkWrite) {
+    	//check whether we are able to create a directory here. If the disk
+    	//happens to be RDONLY we will fail
+    	try {
+          DiskChecker.checkDir(new File(file.getParent().toUri().getPath()));
+    	} catch (DiskErrorException d) {
+          LOG.warn(StringUtils.stringifyException(d));
+          return null;
+    	}
       }
+      return file;
     }
 
     /**
@@ -274,17 +296,6 @@ public class LocalDirAllocator {
       return dirNumLastAccessed;
     }
     
-    /** Get a path from the local FS. This method should be used if the size of 
-     *  the file is not known a priori. 
-     *  
-     *  It will use roulette selection, picking directories
-     *  with probability proportional to their available space. 
-     */
-    public synchronized Path getLocalPathForWrite(String path, 
-        Configuration conf) throws IOException {
-      return getLocalPathForWrite(path, SIZE_UNKNOWN, conf);
-    }
-
     /** Get a path from the local FS. If size is known, we go
      *  round-robin over the set of disks (via the configured dirs) and return
      *  the first complete path which has enough space.
@@ -292,8 +303,10 @@ public class LocalDirAllocator {
      *  If size is not known, use roulette selection -- pick directories
      *  with probability proportional to their available space.
      */
-    public synchronized Path getLocalPathForWrite(String pathStr, long size, 
-        Configuration conf) throws IOException {
+    public synchronized 
+    Path getLocalPathForWrite(String pathStr, long size, 
+    	                      Configuration conf, boolean checkWrite
+    	                      ) throws IOException {
       confChanged(conf);
       int numDirs = localDirs.length;
       int numDirsSearched = 0;
@@ -325,7 +338,7 @@ public class LocalDirAllocator {
             dir++;
           }
           dirNumLastAccessed = dir;
-          returnPath = createPath(pathStr);
+          returnPath = createPath(pathStr, checkWrite);
           if (returnPath == null) {
             totalAvailable -= availableOnDisk[dir];
             availableOnDisk[dir] = 0; // skip this disk
@@ -336,7 +349,7 @@ public class LocalDirAllocator {
         while (numDirsSearched < numDirs && returnPath == null) {
           long capacity = dirDF[dirNumLastAccessed].getAvailable();
           if (capacity > size) {
-            returnPath = createPath(pathStr);
+            returnPath = createPath(pathStr, checkWrite);
           }
           dirNumLastAccessed++;
           dirNumLastAccessed = dirNumLastAccessed % numDirs; 
@@ -362,7 +375,7 @@ public class LocalDirAllocator {
         Configuration conf) throws IOException {
 
       // find an appropriate directory
-      Path path = getLocalPathForWrite(pathStr, size, conf);
+      Path path = getLocalPathForWrite(pathStr, size, conf, true);
       File dir = new File(path.getParent().toUri().getPath());
       String prefix = path.getName();
 
@@ -399,6 +412,7 @@ public class LocalDirAllocator {
       " the configured local directories");
     }
 
+
     /** We search through all the configured dirs for the file's existence
      *  and return true when we find one 
      */

Modified: hadoop/common/branches/branch-0.22/common/src/test/core/org/apache/hadoop/fs/TestLocalDirAllocator.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.22/common/src/test/core/org/apache/hadoop/fs/TestLocalDirAllocator.java?rev=1346212&r1=1346211&r2=1346212&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.22/common/src/test/core/org/apache/hadoop/fs/TestLocalDirAllocator.java
(original)
+++ hadoop/common/branches/branch-0.22/common/src/test/core/org/apache/hadoop/fs/TestLocalDirAllocator.java
Tue Jun  5 01:35:01 2012
@@ -20,11 +20,12 @@ package org.apache.hadoop.fs;
 import java.io.File;
 import java.io.IOException;
 
+import junit.framework.TestCase;
+
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.Shell;
 
-import junit.framework.TestCase;
-
 /** This test LocalDirAllocator works correctly;
  * Every test case uses different buffer dirs to 
  * enforce the AllocatorPerContext initialization.
@@ -122,6 +123,44 @@ public class TestLocalDirAllocator exten
       rmBufferDirs();
     }
   }
+  
+  /** Testing the checkwrite functionality
+   * Create a buffer directory(RW) . get the paths with checkwrite on and off.
+   * Change it READONLY. get the paths with checkwrite on and off. 
+   * With checkWrite on, there should be an exception.
+   * @throws Exception
+   */
+  public void testCheckWrite() throws Exception {
+    if (isWindows) return;
+    try {
+      conf.set(CONTEXT, BUFFER_DIR[0]);
+      assertTrue(localFs.mkdirs(BUFFER_PATH[0]));
+      
+      Path pathFalse = dirAllocator.getLocalPathForWrite("test", -1, conf, false);
+      Path pathTrue = dirAllocator.getLocalPathForWrite("test", -1, conf, true);
+      
+      assertTrue ("The returned paths are different. TruePath = " + pathTrue + " , FalsePath
= "+ pathFalse,pathTrue.equals(pathFalse)) ;
+      
+      //set to Read only
+      new File(BUFFER_DIR[0]).setReadOnly();
+      
+      pathFalse = null;
+      pathFalse = dirAllocator.getLocalPathForWrite("test", -1, conf, false);
+      assertTrue ("The returned paths are different after setting to readonly. TruePath =
" + pathTrue + " , FalsePath = "+ pathFalse,pathTrue.equals(pathFalse)) ;
+
+      try {
+        pathTrue = dirAllocator.getLocalPathForWrite("test", -1, conf, true);
+        fail ();
+      }
+      catch (DiskErrorException dee){
+        // The exception is expected.
+      }
+    } finally {
+      Shell.execCommand(new String[]{"chmod", "u+w", BUFFER_DIR_ROOT});
+      rmBufferDirs();
+    }
+  }
+  
   /** Two buffer dirs. Both do not exist but on a RW disk.
    * Check if tmp dirs are allocated in a round-robin 
    */



Mime
View raw message