hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From da...@apache.org
Subject svn commit: r1366074 - in /hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/test/java/org/apache/hadoop/hdfs/
Date Thu, 26 Jul 2012 16:33:04 GMT
Author: daryn
Date: Thu Jul 26 16:33:03 2012
New Revision: 1366074

URL: http://svn.apache.org/viewvc?rev=1366074&view=rev
Log:
svn merge -r 1365800:1365817 FIXES: HDFS-3626. Creating file with invalid path can corrupt
edit log. Contributed by Todd Lipcon.

Modified:
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1366074&r1=1366073&r2=1366074&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Jul
26 16:33:03 2012
@@ -81,6 +81,8 @@ Release 0.23.3 - UNRELEASED
     HDFS-3688. Namenode loses datanode hostname if datanode re-registers
     (Jason Lowe via daryn)
 
+    HDFS-3626. Creating file with invalid path can corrupt edit log (todd)
+
 Release 0.23.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1366074&r1=1366073&r2=1366074&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
(original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
Thu Jul 26 16:33:03 2012
@@ -64,6 +64,9 @@ import org.apache.hadoop.ipc.RemoteExcep
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 
 @InterfaceAudience.Private
 public class DFSUtil {
@@ -107,7 +110,7 @@ public class DFSUtil {
   
   /**
    * Whether the pathname is valid.  Currently prohibits relative paths, 
-   * and names which contain a ":" or "/" 
+   * names which contain a ":" or "//", or other non-canonical paths.
    */
   public static boolean isValidName(String src) {
       
@@ -117,15 +120,22 @@ public class DFSUtil {
     }
       
     // Check for ".." "." ":" "/"
-    StringTokenizer tokens = new StringTokenizer(src, Path.SEPARATOR);
-    while(tokens.hasMoreTokens()) {
-      String element = tokens.nextToken();
+    String[] components = StringUtils.split(src, '/');
+    for (int i = 0; i < components.length; i++) {
+      String element = components[i];
       if (element.equals("..") || 
           element.equals(".")  ||
           (element.indexOf(":") >= 0)  ||
           (element.indexOf("/") >= 0)) {
         return false;
       }
+      
+      // The string may start or end with a /, but not have
+      // "//" in the middle.
+      if (element.isEmpty() && i != components.length - 1 &&
+          i != 0) {
+        return false;
+      }
     }
     return true;
   }

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1366074&r1=1366073&r2=1366074&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
(original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
Thu Jul 26 16:33:03 2012
@@ -249,8 +249,15 @@ public class FSDirectory implements Clos
 
     // Always do an implicit mkdirs for parent directory tree.
     long modTime = now();
-    if (!mkdirs(new Path(path).getParent().toString(), permissions, true,
-        modTime)) {
+    
+    Path parent = new Path(path).getParent();
+    if (parent == null) {
+      // Trying to add "/" as a file - this path has no
+      // parent -- avoids an NPE below.
+      return null;
+    }
+    
+    if (!mkdirs(parent.toString(), permissions, true, modTime)) {
       return null;
     }
     INodeFileUnderConstruction newNode = new INodeFileUnderConstruction(

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java?rev=1366074&r1=1366073&r2=1366074&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java
(original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java
Thu Jul 26 16:33:03 2012
@@ -19,32 +19,44 @@ package org.apache.hadoop.hdfs;
 
 import junit.framework.TestCase;
 import java.io.*;
+import static org.junit.Assert.*;
+
+import java.io.DataOutputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
-
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Test;
 
 /**
- * This class tests that the DFS command mkdirs cannot create subdirectories
- * from a file when passed an illegal path.  HADOOP-281.
+ * This class tests that the DFS command mkdirs only creates valid
+ * directories, and generally behaves as expected.
  */
 public class TestDFSMkdirs extends TestCase {
+  private Configuration conf = new HdfsConfiguration();
+
+  private static final String[] NON_CANONICAL_PATHS = new String[] {
+      "//test1",
+      "/test2/..",
+      "/test2//bar",
+      "/test2/../test4",
+      "/test5/."
+  };
 
-  private void writeFile(FileSystem fileSys, Path name) throws IOException {
-    DataOutputStream stm = fileSys.create(name);
-    stm.writeBytes("wchien");
-    stm.close();
-  }
-  
   /**
    * Tests mkdirs can create a directory that does not exist and will
-   * not create a subdirectory off a file.
+   * not create a subdirectory off a file. Regression test for HADOOP-281.
    */
   public void testDFSMkdirs() throws IOException {
-    Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
     FileSystem fileSys = cluster.getFileSystem();
     try {
       // First create a new directory with mkdirs
@@ -55,7 +67,7 @@ public class TestDFSMkdirs extends TestC
 
       // Second, create a file in that directory.
       Path myFile = new Path("/test/mkdirs/myFile");
-      writeFile(fileSys, myFile);
+      DFSTestUtil.writeFile(fileSys, myFile, "hello world");
    
       // Third, use mkdir to create a subdirectory off of that file,
       // and check that it fails.
@@ -90,7 +102,7 @@ public class TestDFSMkdirs extends TestC
       // Create a dir when parent dir exists as a file, should fail
       IOException expectedException = null;
       String filePath = "/mkdir-file-" + System.currentTimeMillis();
-      writeFile(dfs, new Path(filePath));
+      DFSTestUtil.writeFile(dfs, new Path(filePath), "hello world");
       try {
         dfs.mkdir(new Path(filePath + "/mkdir"), FsPermission.getDefault());
       } catch (IOException e) {
@@ -117,4 +129,29 @@ public class TestDFSMkdirs extends TestC
       cluster.shutdown();
     }
   }
+
+  /**
+   * Regression test for HDFS-3626. Creates a file using a non-canonical path
+   * (i.e. with extra slashes between components) and makes sure that the NN
+   * rejects it.
+   */
+  @Test
+  public void testMkdirRpcNonCanonicalPath() throws IOException {
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+    try {
+      NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
+      
+      for (String pathStr : NON_CANONICAL_PATHS) {
+        try {
+          nnrpc.mkdirs(pathStr, new FsPermission((short)0755), true);
+          fail("Did not fail when called with a non-canonicalized path: "
+             + pathStr);
+        } catch (InvalidPathException ipe) {
+          // expected
+        }
+      }
+    } finally {
+      cluster.shutdown();
+    }
+  }
 }

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java?rev=1366074&r1=1366073&r2=1366074&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
(original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
Thu Jul 26 16:33:03 2012
@@ -308,4 +308,11 @@ public class TestDFSUtil {
     assertEquals("0.0.0.0:50070", httpport);
   }
 
-}
\ No newline at end of file
+  @Test
+  public void testIsValidName() {
+    assertFalse(DFSUtil.isValidName("/foo/../bar"));
+    assertFalse(DFSUtil.isValidName("/foo//bar"));
+    assertTrue(DFSUtil.isValidName("/"));
+    assertTrue(DFSUtil.isValidName("/bar/"));
+  }
+}

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java?rev=1366074&r1=1366073&r2=1366074&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
(original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
Thu Jul 26 16:33:03 2012
@@ -38,6 +38,9 @@ import java.io.FileNotFoundException;
 import java.io.FileReader;
 import java.io.IOException;
 import java.net.InetSocketAddress;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.UnknownHostException;
 import java.util.EnumSet;
 
 import org.apache.commons.logging.LogFactory;
@@ -48,6 +51,7 @@ import org.apache.hadoop.fs.FSDataInputS
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsServerDefaults;
+import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -62,7 +66,10 @@ import org.apache.hadoop.hdfs.server.dat
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 
 
@@ -84,6 +91,15 @@ public class TestFileCreation extends ju
   static final int numBlocks = 2;
   static final int fileSize = numBlocks * blockSize + 1;
   boolean simulatedStorage = false;
+  
+  private static final String[] NON_CANONICAL_PATHS = new String[] {
+    "//foo",
+    "///foo2",
+    "//dir//file",
+    "////test2/file",
+    "/dir/./file2",
+    "/dir/../file3"
+  };
 
   // creates a file but does not close it
   public static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
@@ -937,4 +953,90 @@ public class TestFileCreation extends ju
       }
     }
   }
+
+  /**
+   * Regression test for HDFS-3626. Creates a file using a non-canonical path
+   * (i.e. with extra slashes between components) and makes sure that the NN
+   * can properly restart.
+   * 
+   * This test RPCs directly to the NN, to ensure that even an old client
+   * which passes an invalid path won't cause corrupt edits.
+   */
+  public void testCreateNonCanonicalPathAndRestartRpc() throws Exception {
+    doCreateTest(CreationMethod.DIRECT_NN_RPC);
+  }
+  
+  /**
+   * Another regression test for HDFS-3626. This one creates files using
+   * a Path instantiated from a string object.
+   */
+  public void testCreateNonCanonicalPathAndRestartFromString()
+      throws Exception {
+    doCreateTest(CreationMethod.PATH_FROM_STRING);
+  }
+
+  /**
+   * Another regression test for HDFS-3626. This one creates files using
+   * a Path instantiated from a URI object.
+   */
+  public void testCreateNonCanonicalPathAndRestartFromUri()
+      throws Exception {
+    doCreateTest(CreationMethod.PATH_FROM_URI);
+  }
+  
+  private static enum CreationMethod {
+    DIRECT_NN_RPC,
+    PATH_FROM_URI,
+    PATH_FROM_STRING
+  };
+  private void doCreateTest(CreationMethod method) throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(1).build();
+    try {
+      FileSystem fs = cluster.getFileSystem();
+      NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
+
+      for (String pathStr : NON_CANONICAL_PATHS) {
+        System.out.println("Creating " + pathStr + " by " + method);
+        switch (method) {
+        case DIRECT_NN_RPC:
+          try {
+            nnrpc.create(pathStr, new FsPermission((short)0755), "client",
+                new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
+                true, (short)1, 128*1024*1024L);
+            fail("Should have thrown exception when creating '"
+                + pathStr + "'" + " by " + method);
+          } catch (InvalidPathException ipe) {
+            // When we create by direct NN RPC, the NN just rejects the
+            // non-canonical paths, rather than trying to normalize them.
+            // So, we expect all of them to fail. 
+          }
+          break;
+          
+        case PATH_FROM_URI:
+        case PATH_FROM_STRING:
+          // Unlike the above direct-to-NN case, we expect these to succeed,
+          // since the Path constructor should normalize the path.
+          Path p;
+          if (method == CreationMethod.PATH_FROM_URI) {
+            p = new Path(new URI(fs.getUri() + pathStr));
+          } else {
+            p = new Path(fs.getUri() + pathStr);  
+          }
+          FSDataOutputStream stm = fs.create(p);
+          IOUtils.closeStream(stm);
+          break;
+        default:
+          throw new AssertionError("bad method: " + method);
+        }
+      }
+      
+      cluster.restartNameNode();
+
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
 }



Mime
View raw message