hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sur...@apache.org
Subject svn commit: r831475 [2/2] - in /hadoop/common/trunk: ./ src/java/ src/java/org/apache/hadoop/fs/ src/java/org/apache/hadoop/fs/ftp/ src/java/org/apache/hadoop/fs/local/ src/test/core/org/apache/hadoop/fs/
Date Fri, 30 Oct 2009 22:24:23 GMT
Modified: hadoop/common/trunk/src/java/org/apache/hadoop/fs/Options.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/java/org/apache/hadoop/fs/Options.java?rev=831475&r1=831474&r2=831475&view=diff
==============================================================================
--- hadoop/common/trunk/src/java/org/apache/hadoop/fs/Options.java (original)
+++ hadoop/common/trunk/src/java/org/apache/hadoop/fs/Options.java Fri Oct 30 22:24:22 2009
@@ -33,7 +33,7 @@
     public static BlockSize blockSize(long bs) { 
       return new BlockSize(bs);
     }
-    public static BufferSize bufferSize(short bs) { 
+    public static BufferSize bufferSize(int bs) { 
       return new BufferSize(bs);
     }
     public static ReplicationFactor repFac(short rf) { 
@@ -52,7 +52,7 @@
       return new CreateParent(false);
     }
     
-    static class BlockSize extends CreateOpts {
+    public static class BlockSize extends CreateOpts {
       private final long blockSize;
       protected BlockSize(long bs) {
         if (bs <= 0) {
@@ -61,10 +61,10 @@
         }
         blockSize = bs; 
       }
-      long getValue() { return blockSize; }
+      public long getValue() { return blockSize; }
     }
     
-    static class ReplicationFactor extends CreateOpts {
+    public static class ReplicationFactor extends CreateOpts {
       private final short replication;
       protected ReplicationFactor(short rf) { 
         if (rf <= 0) {
@@ -73,22 +73,22 @@
         }
         replication = rf;
       }
-      short getValue() { return replication; }
+      public short getValue() { return replication; }
     }
     
-    static class BufferSize extends CreateOpts {
+    public static class BufferSize extends CreateOpts {
       private final int bufferSize;
-      protected BufferSize(short bs) {
+      protected BufferSize(int bs) {
         if (bs <= 0) {
           throw new IllegalArgumentException(
                         "Buffer size must be greater than 0");
         }
         bufferSize = bs; 
       }
-      int getValue() { return bufferSize; }
+      public int getValue() { return bufferSize; }
     }
     
-    static class BytesPerChecksum extends CreateOpts {
+    public static class BytesPerChecksum extends CreateOpts {
       private final int bytesPerChecksum;
       protected BytesPerChecksum(short bpc) { 
         if (bpc <= 0) {
@@ -97,10 +97,10 @@
         }
         bytesPerChecksum = bpc; 
       }
-      int getValue() { return bytesPerChecksum; }
+      public int getValue() { return bytesPerChecksum; }
     }
     
-    static class Perms extends CreateOpts {
+    public static class Perms extends CreateOpts {
       private final FsPermission permissions;
       protected Perms(FsPermission perm) { 
         if(perm == null) {
@@ -108,10 +108,10 @@
         }
         permissions = perm; 
       }
-      FsPermission getValue() { return permissions; }
+      public FsPermission getValue() { return permissions; }
     }
     
-    static class Progress extends CreateOpts {
+    public static class Progress extends CreateOpts {
       private final Progressable progress;
       protected Progress(Progressable prog) { 
         if(prog == null) {
@@ -119,14 +119,65 @@
         }
         progress = prog;
       }
-      Progressable getValue() { return progress; }
+      public Progressable getValue() { return progress; }
     }
     
-    static class CreateParent extends CreateOpts {
-      private final Boolean createParent;
+    public static class CreateParent extends CreateOpts {
+      private final boolean createParent;
       protected CreateParent(boolean createPar) {
         createParent = createPar;}
-      Boolean getValue() { return createParent; }
+      public boolean getValue() { return createParent; }
+    }
+
+    
+    /**
+     * Get an option of desired type
+     * @param theClass is the desired class of the opt
+     * @param opts - not null - at least one opt must be passed
+     * @return an opt from one of the opts of type theClass.
+     *   returns null if there isn't any
+     */
+    protected static CreateOpts getOpt(Class<? extends CreateOpts> theClass,  CreateOpts
...opts) {
+      if (opts == null) {
+        throw new IllegalArgumentException("Null opt");
+      }
+      CreateOpts result = null;
+      for (int i = 0; i < opts.length; ++i) {
+        if (opts[i].getClass() == theClass) {
+          if (result != null) 
+            throw new IllegalArgumentException("multiple blocksize varargs");
+          result = opts[i];
+        }
+      }
+      return result;
+    }
+    /**
+     * set an option
+     * @param newValue  the option to be set
+     * @param opts  - the option is set into this array of opts
+     * @return updated CreateOpts[] == opts + newValue
+     */
+    protected static <T extends CreateOpts> CreateOpts[] setOpt(T newValue,
+        CreateOpts ...opts) {
+      boolean alreadyInOpts = false;
+      if (opts != null) {
+        for (int i = 0; i < opts.length; ++i) {
+          if (opts[i].getClass() == newValue.getClass()) {
+            if (alreadyInOpts) 
+              throw new IllegalArgumentException("multiple opts varargs");
+            alreadyInOpts = true;
+            opts[i] = newValue;
+          }
+        }
+      }
+      CreateOpts[] resultOpt = opts;
+      if (!alreadyInOpts) { // no newValue in opt
+        CreateOpts[] newOpts = new CreateOpts[opts.length + 1];
+        System.arraycopy(opts, 0, newOpts, 0, opts.length);
+        newOpts[opts.length] = newValue;
+        resultOpt = newOpts;
+      }
+      return resultOpt;
     }
   }
 

Modified: hadoop/common/trunk/src/java/org/apache/hadoop/fs/ftp/FTPFileSystemConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/java/org/apache/hadoop/fs/ftp/FTPFileSystemConfigKeys.java?rev=831475&r1=831474&r2=831475&view=diff
==============================================================================
--- hadoop/common/trunk/src/java/org/apache/hadoop/fs/ftp/FTPFileSystemConfigKeys.java (original)
+++ hadoop/common/trunk/src/java/org/apache/hadoop/fs/ftp/FTPFileSystemConfigKeys.java Fri
Oct 30 22:24:22 2009
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ftp;
-
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-
-/** 
- * This class contains constants for configuration keys used
- * in the ftp file system. 
- *
- */
-
-public class FTPFileSystemConfigKeys extends CommonConfigurationKeys {
-  public static final String  FTP_BLOCK_SIZE_KEY = "ftp.blocksize";
-  public static final long    FTP_BLOCK_SIZE_DEFAULT = 64*1024*1024;
-  public static final String  FTP_REPLICATION_KEY = "ftp.replication";
-  public static final short   FTP_REPLICATION_DEFAULT = 1;
-  public static final String  FTP_STREAM_BUFFER_SIZE_KEY = 
-                                                    "ftp.stream-buffer-size";
-  public static final int     FTP_STREAM_BUFFER_SIZE_DEFAULT = 4096;
-  public static final String  FTP_BYTES_PER_CHECKSUM_KEY = 
-                                                    "ftp.bytes-per-checksum";
-  public static final int     FTP_BYTES_PER_CHECKSUM_DEFAULT = 512;
-  public static final String  FTP_CLIENT_WRITE_PACKET_SIZE_KEY =
-                                                    "ftp.client-write-packet-size";
-  public static final int     FTP_CLIENT_WRITE_PACKET_SIZE_DEFAULT = 64*1024;
-}
-  

Added: hadoop/common/trunk/src/java/org/apache/hadoop/fs/ftp/FtpConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/java/org/apache/hadoop/fs/ftp/FtpConfigKeys.java?rev=831475&view=auto
==============================================================================
--- hadoop/common/trunk/src/java/org/apache/hadoop/fs/ftp/FtpConfigKeys.java (added)
+++ hadoop/common/trunk/src/java/org/apache/hadoop/fs/ftp/FtpConfigKeys.java Fri Oct 30 22:24:22
2009
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ftp;
+
+import java.io.IOException;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FsServerDefaults;
+
+/** 
+ * This class contains constants for configuration keys used
+ * in the ftp file system.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class FtpConfigKeys extends CommonConfigurationKeys {
+  public static final String  BLOCK_SIZE_KEY = "ftp.blocksize";
+  public static final long    BLOCK_SIZE_DEFAULT = 4*1024;
+  public static final String  REPLICATION_KEY = "ftp.replication";
+  public static final short   REPLICATION_DEFAULT = 1;
+  public static final String  STREAM_BUFFER_SIZE_KEY = 
+                                                    "ftp.stream-buffer-size";
+  public static final int     STREAM_BUFFER_SIZE_DEFAULT = 1024*1024;
+  public static final String  BYTES_PER_CHECKSUM_KEY = 
+                                                    "ftp.bytes-per-checksum";
+  public static final int     BYTES_PER_CHECKSUM_DEFAULT = 512;
+  public static final String  CLIENT_WRITE_PACKET_SIZE_KEY =
+                                                "ftp.client-write-packet-size";
+  public static final int     CLIENT_WRITE_PACKET_SIZE_DEFAULT = 64*1024;
+  
+  protected static FsServerDefaults getServerDefaults() throws IOException {
+    return new FsServerDefaults(
+        BLOCK_SIZE_DEFAULT,
+        BYTES_PER_CHECKSUM_DEFAULT,
+        CLIENT_WRITE_PACKET_SIZE_DEFAULT,
+        REPLICATION_DEFAULT,
+        STREAM_BUFFER_SIZE_DEFAULT);
+  }
+}
+  

Added: hadoop/common/trunk/src/java/org/apache/hadoop/fs/ftp/FtpFs.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/java/org/apache/hadoop/fs/ftp/FtpFs.java?rev=831475&view=auto
==============================================================================
--- hadoop/common/trunk/src/java/org/apache/hadoop/fs/ftp/FtpFs.java (added)
+++ hadoop/common/trunk/src/java/org/apache/hadoop/fs/ftp/FtpFs.java Fri Oct 30 22:24:22 2009
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.ftp;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import org.apache.commons.net.ftp.FTP;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.AbstractFileSystem;
+import org.apache.hadoop.fs.DelegateToFileSystem;
+import org.apache.hadoop.fs.FsConstants;
+import org.apache.hadoop.fs.FsServerDefaults;
+
+/**
+ * The FtpFs implementation of AbstractFileSystem.
+ * This impl delegates to the old FileSystem
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */
+public class FtpFs extends DelegateToFileSystem {
+  /**
+   * This constructor has the signature needed by
+   * {@link AbstractFileSystem#createFileSystem(URI, Configuration)}.
+   * 
+   * @param theUri which must be that of localFs
+   * @param conf
+   * @throws IOException
+   * @throws URISyntaxException 
+   */
+  FtpFs(final URI theUri, final Configuration conf) throws IOException,
+      URISyntaxException {
+    super(theUri, new FTPFileSystem(), conf, FsConstants.FTP_SCHEME, true);
+  }
+  
+  @Override
+  protected int getUriDefaultPort() {
+    return FTP.DEFAULT_PORT;
+  }
+  
+  @Override
+  protected FsServerDefaults getServerDefaults() throws IOException {
+    return FtpConfigKeys.getServerDefaults();
+  }
+}

Added: hadoop/common/trunk/src/java/org/apache/hadoop/fs/local/LocalConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/java/org/apache/hadoop/fs/local/LocalConfigKeys.java?rev=831475&view=auto
==============================================================================
--- hadoop/common/trunk/src/java/org/apache/hadoop/fs/local/LocalConfigKeys.java (added)
+++ hadoop/common/trunk/src/java/org/apache/hadoop/fs/local/LocalConfigKeys.java Fri Oct 30
22:24:22 2009
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.local;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FsServerDefaults;
+
+/** 
+ * This class contains constants for configuration keys used
+ * in the local file system, raw local fs and checksum fs.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class LocalConfigKeys extends CommonConfigurationKeys {
+  public static final String BLOCK_SIZE_KEY = "file.blocksize";
+  public static final long BLOCK_SIZE_DEFAULT = 64*1024*1024;
+  public static final String  REPLICATION_KEY = "file.replication";
+  public static final short REPLICATION_DEFAULT = 1;
+  public static final String STREAM_BUFFER_SIZE_KEY = "file.stream-buffer-size";
+  public static final int STREAM_BUFFER_SIZE_DEFAULT = 4096;
+  public static final String BYTES_PER_CHECKSUM_KEY = "file.bytes-per-checksum";
+  public static final int BYTES_PER_CHECKSUM_DEFAULT = 512;
+  public static final String CLIENT_WRITE_PACKET_SIZE_KEY =
+                                                "file.client-write-packet-size";
+  public static final int CLIENT_WRITE_PACKET_SIZE_DEFAULT = 64*1024;
+
+  protected static FsServerDefaults getServerDefaults() throws IOException {
+    return new FsServerDefaults(
+        BLOCK_SIZE_DEFAULT,
+        BYTES_PER_CHECKSUM_DEFAULT,
+        CLIENT_WRITE_PACKET_SIZE_DEFAULT,
+        REPLICATION_DEFAULT,
+        STREAM_BUFFER_SIZE_DEFAULT);
+  }
+}
+  

Added: hadoop/common/trunk/src/java/org/apache/hadoop/fs/local/LocalFs.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/java/org/apache/hadoop/fs/local/LocalFs.java?rev=831475&view=auto
==============================================================================
--- hadoop/common/trunk/src/java/org/apache/hadoop/fs/local/LocalFs.java (added)
+++ hadoop/common/trunk/src/java/org/apache/hadoop/fs/local/LocalFs.java Fri Oct 30 22:24:22
2009
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.local;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.AbstractFileSystem;
+import org.apache.hadoop.fs.ChecksumFs;
+
+/**
+ * The LocalFs implementation of ChecksumFs.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */
+public class LocalFs extends ChecksumFs {
+  LocalFs(final Configuration conf) throws IOException, URISyntaxException {
+    super(new RawLocalFs(conf));
+  }
+  
+  /**
+   * This constructor has the signature needed by
+   * {@link AbstractFileSystem#createFileSystem(URI, Configuration)}.
+   * 
+   * @param theUri which must be that of localFs
+   * @param conf
+   * @throws IOException
+   * @throws URISyntaxException 
+   */
+  LocalFs(final URI theUri, final Configuration conf) throws IOException,
+      URISyntaxException {
+    this(conf);
+  }
+}

Added: hadoop/common/trunk/src/java/org/apache/hadoop/fs/local/RawLocalFs.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/java/org/apache/hadoop/fs/local/RawLocalFs.java?rev=831475&view=auto
==============================================================================
--- hadoop/common/trunk/src/java/org/apache/hadoop/fs/local/RawLocalFs.java (added)
+++ hadoop/common/trunk/src/java/org/apache/hadoop/fs/local/RawLocalFs.java Fri Oct 30 22:24:22
2009
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.local;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DelegateToFileSystem;
+import org.apache.hadoop.fs.FsConstants;
+import org.apache.hadoop.fs.FsServerDefaults;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+
+
+/**
+ * The RawLocalFs implementation of AbstractFileSystem.
+ *  This impl delegates to the old FileSystem
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */
+public class RawLocalFs extends DelegateToFileSystem {
+  RawLocalFs(final Configuration conf) throws IOException, URISyntaxException {
+    this(FsConstants.LOCAL_FS_URI, conf);
+  }
+  
+  /**
+   * This constructor has the signature needed by
+   * {@link AbstractFileSystem#createFileSystem(URI, Configuration)}.
+   * 
+   * @param theUri which must be that of localFs
+   * @param conf
+   * @throws IOException
+   * @throws URISyntaxException 
+   */
+  RawLocalFs(final URI theUri, final Configuration conf) throws IOException,
+      URISyntaxException {
+    super(theUri, new RawLocalFileSystem(), conf, 
+        FsConstants.LOCAL_FS_URI.getScheme(), false);
+  }
+  
+  @Override
+  protected int getUriDefaultPort() {
+    return -1; // No default port for file:///
+  }
+  
+  @Override
+  protected FsServerDefaults getServerDefaults() throws IOException {
+    return LocalConfigKeys.getServerDefaults();
+  }
+}

Added: hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/FileContextPermissionBase.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/FileContextPermissionBase.java?rev=831475&view=auto
==============================================================================
--- hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/FileContextPermissionBase.java
(added)
+++ hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/FileContextPermissionBase.java
Fri Oct 30 22:24:22 2009
@@ -0,0 +1,215 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.StringTokenizer;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.util.StringUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * <p>
+ * A collection of permission tests for the {@link FileContext}.
+ * This test should be used for testing an instance of FileContext
+ *  that has been initialized to a specific default FileSystem such a
+ *  LocalFileSystem, HDFS,S3, etc.
+ * </p>
+ * <p>
+ * To test a given {@link FileSystem} implementation create a subclass of this
+ * test and override {@link #setUp()} to initialize the <code>fc</code> 
+ * {@link FileContext} instance variable.
+ * 
+ * Since this a junit 4 you can also do a single setup before 
+ * the start of any tests.
+ * E.g.
+ *     @BeforeClass   public static void clusterSetupAtBegining()
+ *     @AfterClass    public static void ClusterShutdownAtEnd()
+ * </p>
+ */
+public class FileContextPermissionBase {  
+  static final String TEST_ROOT_DIR = new Path(System.getProperty(
+      "test.build.data", "/tmp")).toString().replace(' ', '_')
+      + "/" + TestLocalFileSystemPermission.class.getSimpleName() + "_";
+  
+  protected Path getTestRootRelativePath(String pathString) {
+    return fc.makeQualified(new Path(TEST_ROOT_DIR, pathString));
+  }
+  
+  private Path rootPath = null;
+  protected Path getTestRootPath() {
+    if (rootPath == null) {
+      rootPath = fc.makeQualified(new Path(TEST_ROOT_DIR));
+    }
+    return rootPath;   
+  }
+
+
+  {
+    try {
+      ((org.apache.commons.logging.impl.Log4JLogger)FileSystem.LOG).getLogger()
+      .setLevel(org.apache.log4j.Level.DEBUG);
+    }
+    catch(Exception e) {
+      System.out.println("Cannot change log level\n"
+          + StringUtils.stringifyException(e));
+    }
+  }
+  
+  static FileContext fc;
+
+  @Before
+  public void setUp() throws Exception {
+    fc.mkdir(getTestRootPath(), FileContext.DEFAULT_PERM, true);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    fc.delete(getTestRootPath(), true);
+  }
+  
+
+  private Path writeFile(FileContext theFc, String name) throws IOException {
+    Path f = getTestRootRelativePath(name);
+    FSDataOutputStream stm = theFc.create(f, EnumSet.of(CreateFlag.CREATE));
+    stm.writeBytes("42\n");
+    stm.close();
+    return f;
+  }
+
+  private void cleanupFile(FileContext theFc, Path name) throws IOException {
+    Assert.assertTrue(theFc.exists(name));
+    theFc.delete(name, true);
+    Assert.assertTrue(!theFc.exists(name));
+  }
+
+  @Test
+  public void testCreatePermission() throws IOException {
+    if (Path.WINDOWS) {
+      System.out.println("Cannot run test for Windows");
+      return;
+    }
+    String filename = "foo";
+    Path f = writeFile(fc, filename);
+    doFilePermissionCheck(FileContext.DEFAULT_PERM.applyUMask(fc.getUMask()),
+                        fc.getFileStatus(f).getPermission());
+  }
+  
+  
+  @Test
+  public void testSetPermission() throws IOException {
+    if (Path.WINDOWS) {
+      System.out.println("Cannot run test for Windows");
+      return;
+    }
+
+    String filename = "foo";
+    Path f = writeFile(fc, filename);
+
+    try {
+      // create files and manipulate them.
+      FsPermission all = new FsPermission((short)0777);
+      FsPermission none = new FsPermission((short)0);
+
+      fc.setPermission(f, none);
+      doFilePermissionCheck(none, fc.getFileStatus(f).getPermission());
+
+      fc.setPermission(f, all);
+      doFilePermissionCheck(all, fc.getFileStatus(f).getPermission());
+    }
+    finally {cleanupFile(fc, f);}
+  }
+
+  @Test
+  public void testSetOwner() throws IOException {
+    if (Path.WINDOWS) {
+      System.out.println("Cannot run test for Windows");
+      return;
+    }
+
+    String filename = "bar";
+    Path f = writeFile(fc, filename);
+    List<String> groups = null;
+    try {
+      groups = getGroups();
+      System.out.println(filename + ": " + fc.getFileStatus(f).getPermission());
+    }
+    catch(IOException e) {
+      System.out.println(StringUtils.stringifyException(e));
+      System.out.println("Cannot run test");
+      return;
+    }
+    if (groups == null || groups.size() < 1) {
+      System.out.println("Cannot run test: need at least one group.  groups="
+                         + groups);
+      return;
+    }
+
+    // create files and manipulate them.
+    try {
+      String g0 = groups.get(0);
+      fc.setOwner(f, null, g0);
+      Assert.assertEquals(g0, fc.getFileStatus(f).getGroup());
+
+      if (groups.size() > 1) {
+        String g1 = groups.get(1);
+        fc.setOwner(f, null, g1);
+        Assert.assertEquals(g1, fc.getFileStatus(f).getGroup());
+      } else {
+        System.out.println("Not testing changing the group since user " +
+                           "belongs to only one group.");
+      }
+    } 
+    finally {cleanupFile(fc, f);}
+  }
+
+  static List<String> getGroups() throws IOException {
+    List<String> a = new ArrayList<String>();
+    String s = Shell.execCommand(Shell.getGROUPS_COMMAND());
+    for(StringTokenizer t = new StringTokenizer(s); t.hasMoreTokens(); ) {
+      a.add(t.nextToken());
+    }
+    return a;
+  }
+  
+  
+  void doFilePermissionCheck(FsPermission expectedPerm, FsPermission actualPerm) {
+  Assert.assertEquals(expectedPerm.applyUMask(getFileMask()), actualPerm);
+  }
+  
+  
+  /*
+   * Some filesystem like HDFS ignore the "x" bit if the permission.
+   * Others like localFs does not.
+   * Override the method below if the file system being tested masks our
+   * certain bits for file masks.
+   */
+  static final FsPermission FILE_MASK_ZERO = new FsPermission((short) 0);
+  FsPermission getFileMask() {
+    return FILE_MASK_ZERO;
+  }
+}

Added: hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/TestFcLocalFsPermission.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/TestFcLocalFsPermission.java?rev=831475&view=auto
==============================================================================
--- hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/TestFcLocalFsPermission.java (added)
+++ hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/TestFcLocalFsPermission.java Fri
Oct 30 22:24:22 2009
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import org.junit.After;
+import org.junit.Before;
+
+/**
+ * Test permissions for localFs using FileContext API.
+ */
+public class TestFcLocalFsPermission extends 
+  FileContextPermissionBase {
+
+  @Before
+  public void setUp() throws Exception {
+    fc = FileContext.getLocalFSFileContext();
+    super.setUp();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    super.tearDown();
+  }
+
+}

Modified: hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/TestFileContextDeleteOnExit.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/TestFileContextDeleteOnExit.java?rev=831475&r1=831474&r2=831475&view=diff
==============================================================================
--- hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/TestFileContextDeleteOnExit.java
(original)
+++ hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/TestFileContextDeleteOnExit.java
Fri Oct 30 22:24:22 2009
@@ -70,8 +70,8 @@
   }
   
   private void checkDeleteOnExitData(int size, FileContext fc, Path... paths) {
-    Assert.assertEquals(size, FileContext.deleteOnExit.size());
-    Set<Path> set = FileContext.deleteOnExit.get(fc);
+    Assert.assertEquals(size, FileContext.DELETE_ON_EXIT.size());
+    Set<Path> set = FileContext.DELETE_ON_EXIT.get(fc);
     Assert.assertEquals(paths.length, (set == null ? 0 : set.size()));
     for (Path path : paths) {
       Assert.assertTrue(set.contains(path));
@@ -87,7 +87,7 @@
     checkDeleteOnExitData(1, fc, file1);
     
     // Ensure shutdown hook is added
-    Assert.assertTrue(Runtime.getRuntime().removeShutdownHook(FileContext.finalizer));
+    Assert.assertTrue(Runtime.getRuntime().removeShutdownHook(FileContext.FINALIZER));
     
     Path file2 = getTestPath("dir1/file2");
     createFile(fc, file2);
@@ -101,8 +101,8 @@
     
     // trigger deleteOnExit and ensure the registered
     // paths are cleaned up
-    FileContext.finalizer.start();
-    FileContext.finalizer.join();
+    FileContext.FINALIZER.start();
+    FileContext.FINALIZER.join();
     checkDeleteOnExitData(0, fc, new Path[0]);
     Assert.assertFalse(fc.exists(file1));
     Assert.assertFalse(fc.exists(file2));



Mime
View raw message