hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cdoug...@apache.org
Subject svn commit: r931356 - in /hadoop/hdfs/trunk: ./ src/java/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/server/datanode/ src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/ src/test/hdfs/org/apache/hadoop/security/ src/test/unit/or...
Date Tue, 06 Apr 2010 22:32:14 GMT
Author: cdouglas
Date: Tue Apr  6 22:32:14 2010
New Revision: 931356

URL: http://svn.apache.org/viewvc?rev=931356&view=rev
Log:
HDFS-997. Allow datanode storage directory permissions to be configurable. Contributed by
Luke Lu

Added:
    hadoop/hdfs/trunk/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java
Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/hdfs-default.xml
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/security/TestPermission.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=931356&r1=931355&r2=931356&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Tue Apr  6 22:32:14 2010
@@ -122,6 +122,9 @@ Trunk (unreleased changes)
     HDFS-1024. SecondaryNameNode verifies size of fsimage and edits file.
     (Dmytro Molkov via dhruba)
 
+    HDFS-997. Allow datanode storage directory permissions to be configurable.
+    (Luke Lu via cdouglas)
+
   OPTIMIZATIONS
 
     HDFS-946. NameNode should not return full path name when lisitng a

Modified: hadoop/hdfs/trunk/src/java/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/hdfs-default.xml?rev=931356&r1=931355&r2=931356&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/hdfs-default.xml (original)
+++ hadoop/hdfs/trunk/src/java/hdfs-default.xml Tue Apr  6 22:32:14 2010
@@ -246,6 +246,14 @@ creations/deletions), or "all".</descrip
 </property>
 
 <property>
+  <name>dfs.datanode.data.dir.perm</name>
+  <value>755</value>
+  <description>Permissions for the directories on on the local filesystem where
+  the DFS data node store its blocks. The permissions can either be octal or
+  symbolic.</description>
+</property>
+
+<property>
   <name>dfs.replication</name>
   <value>3</value>
   <description>Default block replication. 

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=931356&r1=931355&r2=931356&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Tue Apr  6 22:32:14
2010
@@ -115,7 +115,7 @@ public class DFSConfigKeys extends Commo
   public static final String  DFS_NAMENODE_CHECKPOINT_DIR_KEY = "dfs.namenode.checkpoint.dir";
   public static final String  DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY = "dfs.namenode.checkpoint.edits.dir";
 
-  //Code in hdfs is not updated to use these keys.
+  // Much code in hdfs is not yet updated to use these keys.
   public static final String  DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY = "dfs.client.block.write.locateFollowingBlock.retries";
   public static final int     DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT
= 5;
   public static final String  DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY = "dfs.client.block.write.retries";
@@ -126,6 +126,8 @@ public class DFSConfigKeys extends Commo
   public static final int     DFS_BALANCER_MOVEDWINWIDTH_DEFAULT = 5400*1000;
   public static final String  DFS_DATANODE_ADDRESS_KEY = "dfs.datanode.address";
   public static final String  DFS_DATANODE_ADDRESS_DEFAULT = "0.0.0.0:50010";
+  public static final String  DFS_DATANODE_DATA_DIR_PERMISSION_KEY = "dfs.datanode.data.dir.perm";
+  public static final String  DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT = "755";
   public static final String  DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY = "dfs.datanode.directoryscan.interval";
   public static final int     DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT = 21600;
   public static final String  DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY = "dfs.datanode.directoryscan.threads";

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=931356&r1=931355&r2=931356&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Tue Apr
 6 22:32:14 2010
@@ -46,6 +46,10 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
@@ -1423,26 +1427,41 @@ public class DataNode extends Configured
    */
   static DataNode makeInstance(Collection<URI> dataDirs, Configuration conf)
     throws IOException {
+    LocalFileSystem localFS = FileSystem.getLocal(conf);
+    FsPermission permission = new FsPermission(
+        conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY,
+                 DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
+    ArrayList<File> dirs = getDataDirsFromURIs(dataDirs, localFS, permission);
+
+    if (dirs.size() > 0) {
+      return new DataNode(conf, dirs);
+    }
+    LOG.error("All directories in "
+        + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY + " are invalid.");
+    return null;
+  }
+
+  // DataNode ctor expects AbstractList instead of List or Collection...
+  static ArrayList<File> getDataDirsFromURIs(Collection<URI> dataDirs,
+      LocalFileSystem localFS, FsPermission permission) {
     ArrayList<File> dirs = new ArrayList<File>();
-    for(URI dirURI : dataDirs) {
-      if(! "file".equalsIgnoreCase(dirURI.getScheme())) {
-        LOG.warn("Unsupported URI schema in " + dirURI  + ". Ignoring ...");
+    for (URI dirURI : dataDirs) {
+      if (!"file".equalsIgnoreCase(dirURI.getScheme())) {
+        LOG.warn("Unsupported URI schema in " + dirURI + ". Ignoring ...");
         continue;
       }
+      // drop any (illegal) authority in the URI for backwards compatibility
       File data = new File(dirURI.getPath());
       try {
-        DiskChecker.checkDir(data);
+        DiskChecker.checkDir(localFS, new Path(data.toURI()), permission);
         dirs.add(data);
-      } catch(DiskErrorException e) {
-        LOG.warn("Invalid directory in "
-            + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY + ": " + e.getMessage());
+      } catch (IOException e) {
+        LOG.warn("Invalid directory in: "
+                 + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY + ": "
+                 + e.getMessage());
       }
     }
-    if (dirs.size() > 0) 
-      return new DataNode(conf, dirs);
-    LOG.error("All directories in "
-        + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY + " are invalid.");
-    return null;
+    return dirs;
   }
 
   @Override

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java?rev=931356&r1=931355&r2=931356&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
(original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
Tue Apr  6 22:32:14 2010
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
-import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op.WRITE_BLOCK;
 
 import java.io.DataOutputStream;
 import java.io.File;
@@ -29,9 +28,9 @@ import junit.framework.TestCase;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -41,7 +40,6 @@ import org.apache.hadoop.hdfs.security.B
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.io.Text;
 
 /** Test if a datanode can correctly handle errors during block read/write*/
 public class TestDiskError extends TestCase {
@@ -153,4 +151,33 @@ public class TestDiskError extends TestC
       cluster.shutdown();
     }
   }
+
+  public void testLocalDirs() throws Exception {
+    Configuration conf = new Configuration();
+    final String permStr = "755";
+    FsPermission expected = new FsPermission(permStr);
+    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY, permStr);
+    MiniDFSCluster cluster = null;
+
+    try {
+      // Start the cluster
+      cluster = new MiniDFSCluster(conf, 1, true, null);
+      cluster.waitActive();
+
+      // Check permissions on directories in 'dfs.data.dir'
+      FileSystem localFS = FileSystem.getLocal(conf);
+      String[] dataDirs = conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
+      for (String dir : dataDirs) {
+        Path dataDir = new Path(dir);
+        FsPermission actual = localFS.getFileStatus(dataDir).getPermission();
+        assertEquals("Permission for dir: " + dataDir + ", is " + actual +
+                         ", while expected is " + expected,
+                     expected, actual);
+      }
+    } finally {
+      if (cluster != null)
+        cluster.shutdown();
+    }
+
+  }
 }

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/security/TestPermission.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/security/TestPermission.java?rev=931356&r1=931355&r2=931356&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/security/TestPermission.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/security/TestPermission.java Tue Apr
 6 22:32:14 2010
@@ -145,7 +145,7 @@ public class TestPermission extends Test
       RAN.nextBytes(data);
       out.write(data);
       out.close();
-      nnfs.setPermission(CHILD_FILE1, new FsPermission((short)0700));
+      nnfs.setPermission(CHILD_FILE1, new FsPermission("700"));
 
       // following read is legal
       byte dataIn[] = new byte[FILE_LEN];
@@ -177,7 +177,7 @@ public class TestPermission extends Test
       assertTrue(!canOpen(userfs, CHILD_FILE1));
 
       nnfs.setPermission(ROOT_PATH, new FsPermission((short)0755));
-      nnfs.setPermission(CHILD_DIR1, new FsPermission((short)0777));
+      nnfs.setPermission(CHILD_DIR1, new FsPermission("777"));
       nnfs.setPermission(new Path("/"), new FsPermission((short)0777));
       final Path RENAME_PATH = new Path("/foo/bar");
       userfs.mkdirs(RENAME_PATH);

Added: hadoop/hdfs/trunk/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java?rev=931356&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java
(added)
+++ hadoop/hdfs/trunk/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java
Tue Apr  6 22:32:14 2010
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.datanode;
+
+import java.io.*;
+import java.net.URI;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+
+import org.junit.Test;
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.*;
+import static org.apache.hadoop.test.MockitoMaker.*;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+
+public class TestDataDirs {
+
+  @Test public void testGetDataDirsFromURIs() throws Throwable {
+    File localDir = make(stub(File.class).returning(true).from.exists());
+    when(localDir.mkdir()).thenReturn(true);
+    FsPermission normalPerm = new FsPermission("755");
+    FsPermission badPerm = new FsPermission("000");
+    FileStatus stat = make(stub(FileStatus.class)
+        .returning(normalPerm, normalPerm, badPerm).from.getPermission());
+    when(stat.isDir()).thenReturn(true);
+    LocalFileSystem fs = make(stub(LocalFileSystem.class)
+        .returning(stat).from.getFileStatus(any(Path.class)));
+    when(fs.pathToFile(any(Path.class))).thenReturn(localDir);
+    Collection<URI> uris = Arrays.asList(new URI("file:/p1/"),
+        new URI("file:/p2/"), new URI("file:/p3/"));
+
+    List<File> dirs = DataNode.getDataDirsFromURIs(uris, fs, normalPerm);
+
+    verify(fs, times(2)).setPermission(any(Path.class), eq(normalPerm));
+    verify(fs, times(6)).getFileStatus(any(Path.class));
+    assertEquals("number of valid data dirs", dirs.size(), 1);
+  }
+}



Mime
View raw message