hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ji...@apache.org
Subject svn commit: r1574171 - in /hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs: ./ dev-support/ src/main/java/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/client/ src/main/java/org/apache/hadoop/hdfs/server/n...
Date Tue, 04 Mar 2014 18:22:27 GMT
Author: jing9
Date: Tue Mar  4 18:22:26 2014
New Revision: 1574171

URL: http://svn.apache.org/r1574171
Log:
Merging r1573814 through r1574170 from trunk.

Modified:
    hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/   (props changed)
    hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
    hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/   (props
changed)
    hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
    hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
    hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ShortCircuitShm.java
    hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/ShortCircuitCache.java
    hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
    hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
    hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/TestShortCircuitShm.java
    hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
    hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLI.xml

Propchange: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1573814-1574170

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1574171&r1=1574170&r2=1574171&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Mar 
4 18:22:26 2014
@@ -373,6 +373,8 @@ Release 2.4.0 - UNRELEASED
     HDFS-5950. The DFSClient and DataNode should use shared memory segments to
     communicate short-circuit information. (cmccabe)
 
+    HDFS-6046. add dfs.client.mmap.enabled (cmccabe)
+
   OPTIMIZATIONS
 
     HDFS-5790. LeaseManager.findPath is very slow when many leases need recovery
@@ -520,6 +522,9 @@ Release 2.4.0 - UNRELEASED
     HDFS-5866. '-maxSize' and '-step' option fail in OfflineImageViewer.
     (Akira Ajisaka via wheat9)
 
+    HDFS-6040. fix DFSClient issue without libhadoop.so and some other
+    ShortCircuitShm cleanups (cmccabe)
+
   BREAKDOWN OF HDFS-5698 SUBTASKS AND RELATED JIRAS
 
     HDFS-5717. Save FSImage header in protobuf. (Haohui Mai via jing9)
@@ -666,6 +671,9 @@ Release 2.4.0 - UNRELEASED
     HDFS-6028. Print clearer error message when user attempts to delete required
     mask entry from ACL. (cnauroth)
 
+    HDFS-6039. Uploading a File under a Dir with default acls throws "Duplicated
+    ACLFeature". (cnauroth)
+
 Release 2.3.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml?rev=1574171&r1=1574170&r2=1574171&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
(original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
Tue Mar  4 18:22:26 2014
@@ -165,13 +165,13 @@
        <Bug pattern="DM_STRING_CTOR" />
      </Match>
     <Match>
-      <Class name="org.apache.hadoop.hdfs.client.ClientMmapManager" />
-      <Method name="create" />
+      <Class name="org.apache.hadoop.hdfs.client.DfsClientShmManager$EndpointShmManager"
/>
+      <Method name="allocSlot" />
       <Bug pattern="UL_UNRELEASED_LOCK_EXCEPTION_PATH" />
     </Match>
     <Match>
-      <Class name="org.apache.hadoop.hdfs.client.ClientMmapManager" />
-      <Method name="create" />
+      <Class name="org.apache.hadoop.hdfs.client.DfsClientShmManager$EndpointShmManager"
/>
+      <Method name="allocSlot" />
       <Bug pattern="UL_UNRELEASED_LOCK" />
     </Match>
     <!-- Manually verified to be okay, we want to throw away the top bit here -->

Propchange: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1573814-1574170

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1574171&r1=1574170&r2=1574171&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
(original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
Tue Mar  4 18:22:26 2014
@@ -284,6 +284,7 @@ public class DFSClient implements java.i
     final long shortCircuitStreamsCacheExpiryMs; 
     final int shortCircuitSharedMemoryWatcherInterruptCheckMs;
     
+    final boolean shortCircuitMmapEnabled;
     final int shortCircuitMmapCacheSize;
     final long shortCircuitMmapCacheExpiryMs;
     final long shortCircuitMmapCacheRetryTimeout;
@@ -403,6 +404,9 @@ public class DFSClient implements java.i
       shortCircuitStreamsCacheExpiryMs = conf.getLong(
           DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,
           DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_DEFAULT);
+      shortCircuitMmapEnabled = conf.getBoolean(
+          DFSConfigKeys.DFS_CLIENT_MMAP_ENABLED,
+          DFSConfigKeys.DFS_CLIENT_MMAP_ENABLED_DEFAULT);
       shortCircuitMmapCacheSize = conf.getInt(
           DFSConfigKeys.DFS_CLIENT_MMAP_CACHE_SIZE,
           DFSConfigKeys.DFS_CLIENT_MMAP_CACHE_SIZE_DEFAULT);

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1574171&r1=1574170&r2=1574171&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
(original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
Tue Mar  4 18:22:26 2014
@@ -437,6 +437,8 @@ public class DFSConfigKeys extends Commo
   public static final int DFS_CLIENT_READ_SHORTCIRCUIT_BUFFER_SIZE_DEFAULT = 1024 * 1024;
   public static final String DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC = "dfs.client.domain.socket.data.traffic";
   public static final boolean DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT = false;
+  public static final String DFS_CLIENT_MMAP_ENABLED= "dfs.client.mmap.enabled";
+  public static final boolean DFS_CLIENT_MMAP_ENABLED_DEFAULT = true;
   public static final String DFS_CLIENT_MMAP_CACHE_SIZE = "dfs.client.mmap.cache.size";
   public static final int DFS_CLIENT_MMAP_CACHE_SIZE_DEFAULT = 256;
   public static final String DFS_CLIENT_MMAP_CACHE_TIMEOUT_MS = "dfs.client.mmap.cache.timeout.ms";

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java?rev=1574171&r1=1574170&r2=1574171&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
(original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
Tue Mar  4 18:22:26 2014
@@ -1571,7 +1571,10 @@ implements ByteBufferReadable, CanSetDro
             "at position " + pos);
       }
     }
-    ByteBuffer buffer = tryReadZeroCopy(maxLength, opts);
+    ByteBuffer buffer = null;
+    if (dfsClient.getConf().shortCircuitMmapEnabled) {
+      buffer = tryReadZeroCopy(maxLength, opts);
+    }
     if (buffer != null) {
       return buffer;
     }

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ShortCircuitShm.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ShortCircuitShm.java?rev=1574171&r1=1574170&r2=1574171&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ShortCircuitShm.java
(original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ShortCircuitShm.java
Tue Mar  4 18:22:26 2014
@@ -30,7 +30,6 @@ import org.apache.commons.lang.builder.H
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.InvalidRequestException;
-import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.io.nativeio.NativeIO.POSIX;
 import org.apache.hadoop.util.Shell;
@@ -514,7 +513,9 @@ public class ShortCircuitShm {
    * @return          The base address of the slot.
    */
   private final long calculateSlotAddress(int slotIdx) {
-    return this.baseAddress + (slotIdx * BYTES_PER_SLOT);
+    long offset = slotIdx;
+    offset *= BYTES_PER_SLOT;
+    return this.baseAddress + offset;
   }
 
   /**
@@ -536,7 +537,6 @@ public class ShortCircuitShm {
     slot.makeValid();
     slots[idx] = slot;
     if (LOG.isTraceEnabled()) {
-      //LOG.trace(this + ": allocAndRegisterSlot " + idx);
       LOG.trace(this + ": allocAndRegisterSlot " + idx + ": allocatedSlots=" + allocatedSlots
+
                   StringUtils.getStackTrace(Thread.currentThread()));
     }
@@ -567,6 +567,14 @@ public class ShortCircuitShm {
    */
   synchronized public final Slot registerSlot(int slotIdx,
       ExtendedBlockId blockId) throws InvalidRequestException {
+    if (slotIdx < 0) {
+      throw new InvalidRequestException(this + ": invalid negative slot " +
+          "index " + slotIdx);
+    }
+    if (slotIdx >= slots.length) {
+      throw new InvalidRequestException(this + ": invalid slot " +
+          "index " + slotIdx);
+    }
     if (allocatedSlots.get(slotIdx)) {
       throw new InvalidRequestException(this + ": slot " + slotIdx +
           " is already in use.");
@@ -579,7 +587,6 @@ public class ShortCircuitShm {
     slots[slotIdx] = slot;
     allocatedSlots.set(slotIdx, true);
     if (LOG.isTraceEnabled()) {
-      //LOG.trace(this + ": registerSlot " + slotIdx);
       LOG.trace(this + ": registerSlot " + slotIdx + ": allocatedSlots=" + allocatedSlots
+
                   StringUtils.getStackTrace(Thread.currentThread()));
     }

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/ShortCircuitCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/ShortCircuitCache.java?rev=1574171&r1=1574170&r2=1574171&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/ShortCircuitCache.java
(original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/ShortCircuitCache.java
Tue Mar  4 18:22:26 2014
@@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.protocolPB
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.net.unix.DomainSocket;
+import org.apache.hadoop.net.unix.DomainSocketWatcher;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
@@ -375,7 +376,8 @@ public class ShortCircuitCache implement
     this.mmapRetryTimeoutMs = mmapRetryTimeoutMs;
     this.staleThresholdMs = staleThresholdMs;
     DfsClientShmManager shmManager = null;
-    if (shmInterruptCheckMs > 0) {
+    if ((shmInterruptCheckMs > 0) &&
+        (DomainSocketWatcher.getLoadingFailureReason() == null)) {
       try {
         shmManager = new DfsClientShmManager(shmInterruptCheckMs);
       } catch (IOException e) {

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1574171&r1=1574170&r2=1574171&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
(original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
Tue Mar  4 18:22:26 2014
@@ -2246,6 +2246,7 @@ public class FSDirectory implements Clos
     final Quota.Counts counts = child.computeQuotaUsage();
     updateCount(iip, pos,
         counts.get(Quota.NAMESPACE), counts.get(Quota.DISKSPACE), checkQuota);
+    boolean isRename = (child.getParent() != null);
     final INodeDirectory parent = inodes[pos-1].asDirectory();
     boolean added = false;
     try {
@@ -2260,7 +2261,9 @@ public class FSDirectory implements Clos
           -counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));
     } else {
       iip.setINode(pos - 1, child.getParent());
-      AclStorage.copyINodeDefaultAcl(child);
+      if (!isRename) {
+        AclStorage.copyINodeDefaultAcl(child);
+      }
       addToInodeMap(child);
     }
     return added;

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1574171&r1=1574170&r2=1574171&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
(original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
Tue Mar  4 18:22:26 2014
@@ -1533,25 +1533,33 @@
 </property>
 
 <property>
+  <name>dfs.client.mmap.enabled</name>
+  <value>true</value>
+  <description>
+    If this is set to false, the client won't attempt to perform memory-mapped reads.
+  </description>
+</property>
+
+<property>
   <name>dfs.client.mmap.cache.size</name>
-  <value>1024</value>
+  <value>256</value>
   <description>
     When zero-copy reads are used, the DFSClient keeps a cache of recently used
     memory mapped regions.  This parameter controls the maximum number of
     entries that we will keep in that cache.
 
-    If this is set to 0, we will not allow mmap.
-
     The larger this number is, the more file descriptors we will potentially
     use for memory-mapped files.  mmaped files also use virtual address space.
     You may need to increase your ulimit virtual address space limits before
     increasing the client mmap cache size.
+
+    Note that you can still do zero-copy reads when this size is set to 0.
   </description>
 </property>
 
 <property>
   <name>dfs.client.mmap.cache.timeout.ms</name>
-  <value>900000</value>
+  <value>3600000</value>
   <description>
     The minimum length of time that we will keep an mmap entry in the cache
     between uses.  If an entry is in the cache longer than this, and nobody
@@ -1570,7 +1578,7 @@
 
 <property>
   <name>dfs.client.short.circuit.replica.stale.threshold.ms</name>
-  <value>3000000</value>
+  <value>1800000</value>
   <description>
     The maximum amount of time that we will consider a short-circuit replica to
     be valid, if there is no communication from the DataNode.  After this time

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java?rev=1574171&r1=1574170&r2=1574171&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
(original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
Tue Mar  4 18:22:26 2014
@@ -21,6 +21,8 @@ import static org.apache.hadoop.hdfs.DFS
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MMAP_ENABLED;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MMAP_CACHE_SIZE;
 
 import java.io.File;
 import java.io.FileInputStream;
@@ -698,4 +700,63 @@ public class TestEnhancedByteBufferAcces
       }
     }, 10, 60000);
   }
+  
+  @Test
+  public void testClientMmapDisable() throws Exception {
+    HdfsConfiguration conf = initZeroCopyTest();
+    conf.setBoolean(DFS_CLIENT_MMAP_ENABLED, false);
+    MiniDFSCluster cluster = null;
+    final Path TEST_PATH = new Path("/a");
+    final int TEST_FILE_LENGTH = 16385;
+    final int RANDOM_SEED = 23453;
+    final String CONTEXT = "testClientMmapDisable";
+    FSDataInputStream fsIn = null;
+    DistributedFileSystem fs = null;
+    conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);
+
+    try {
+      // With DFS_CLIENT_MMAP_ENABLED set to false, we should not do memory
+      // mapped reads.
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+      cluster.waitActive();
+      fs = cluster.getFileSystem();
+      DFSTestUtil.createFile(fs, TEST_PATH,
+          TEST_FILE_LENGTH, (short)1, RANDOM_SEED);
+      DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
+      fsIn = fs.open(TEST_PATH);
+      try {
+        fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
+        Assert.fail("expected zero-copy read to fail when client mmaps " +
+            "were disabled.");
+      } catch (UnsupportedOperationException e) {
+      }
+    } finally {
+      if (fsIn != null) fsIn.close();
+      if (fs != null) fs.close();
+      if (cluster != null) cluster.shutdown();
+    }
+
+    fsIn = null;
+    fs = null;
+    cluster = null;
+    try {
+      // Now try again with DFS_CLIENT_MMAP_CACHE_SIZE == 0.  It should work.
+      conf.setBoolean(DFS_CLIENT_MMAP_ENABLED, true);
+      conf.setInt(DFS_CLIENT_MMAP_CACHE_SIZE, 0);
+      conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT + ".1");
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+      cluster.waitActive();
+      fs = cluster.getFileSystem();
+      DFSTestUtil.createFile(fs, TEST_PATH,
+          TEST_FILE_LENGTH, (short)1, RANDOM_SEED);
+      DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
+      fsIn = fs.open(TEST_PATH);
+      ByteBuffer buf = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
+      fsIn.releaseBuffer(buf);
+    } finally {
+      if (fsIn != null) fsIn.close();
+      if (fs != null) fs.close();
+      if (cluster != null) cluster.shutdown();
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/TestShortCircuitShm.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/TestShortCircuitShm.java?rev=1574171&r1=1574170&r2=1574171&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/TestShortCircuitShm.java
(original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/TestShortCircuitShm.java
Tue Mar  4 18:22:26 2014
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.client;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.util.ArrayList;
+import java.util.Iterator;
+
+import org.apache.commons.lang.SystemUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.io.nativeio.NativeIO;
+import org.apache.hadoop.io.nativeio.SharedFileDescriptorFactory;
+import org.apache.hadoop.hdfs.ExtendedBlockId;
+import org.apache.hadoop.hdfs.ShortCircuitShm;
+import org.apache.hadoop.hdfs.ShortCircuitShm.ShmId;
+import org.apache.hadoop.hdfs.ShortCircuitShm.Slot;
+import org.junit.Assume;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.Assert;
+
+public class TestShortCircuitShm {
+  public static final Log LOG = LogFactory.getLog(TestShortCircuitShm.class);
+  
+  private static final File TEST_BASE =
+      new File(System.getProperty("test.build.data", "/tmp"));
+
+  @Before
+  public void before() {
+    Assume.assumeTrue(NativeIO.isAvailable());
+    Assume.assumeTrue(SystemUtils.IS_OS_UNIX);
+  }
+
+  @Test(timeout=60000)
+  public void testStartupShutdown() throws Exception {
+    File path = new File(TEST_BASE, "testStartupShutdown");
+    path.mkdirs();
+    SharedFileDescriptorFactory factory =
+        new SharedFileDescriptorFactory("shm_", path.getAbsolutePath());
+    FileInputStream stream =
+        factory.createDescriptor("testStartupShutdown", 4096);
+    ShortCircuitShm shm = new ShortCircuitShm(ShmId.createRandom(), stream);
+    shm.free();
+    stream.close();
+    FileUtil.fullyDelete(path);
+  }
+
+  @Test(timeout=60000)
+  public void testAllocateSlots() throws Exception {
+    File path = new File(TEST_BASE, "testAllocateSlots");
+    path.mkdirs();
+    SharedFileDescriptorFactory factory =
+        new SharedFileDescriptorFactory("shm_", path.getAbsolutePath());
+    FileInputStream stream =
+        factory.createDescriptor("testAllocateSlots", 4096);
+    ShortCircuitShm shm = new ShortCircuitShm(ShmId.createRandom(), stream);
+    int numSlots = 0;
+    ArrayList<Slot> slots = new ArrayList<Slot>();
+    while (!shm.isFull()) {
+      Slot slot = shm.allocAndRegisterSlot(new ExtendedBlockId(123L, "test_bp1"));
+      slots.add(slot);
+      numSlots++;
+    }
+    LOG.info("allocated " + numSlots + " slots before running out.");
+    int slotIdx = 0;
+    for (Iterator<Slot> iter = shm.slotIterator();
+        iter.hasNext(); ) {
+      Assert.assertTrue(slots.contains(iter.next()));
+    }
+    for (Slot slot : slots) {
+      Assert.assertFalse(slot.addAnchor());
+      Assert.assertEquals(slotIdx++, slot.getSlotIdx());
+    }
+    for (Slot slot : slots) {
+      slot.makeAnchorable();
+    }
+    for (Slot slot : slots) {
+      Assert.assertTrue(slot.addAnchor());
+    }
+    for (Slot slot : slots) {
+      slot.removeAnchor();
+    }
+    for (Slot slot : slots) {
+      shm.unregisterSlot(slot.getSlotIdx());
+      slot.makeInvalid();
+    }
+    shm.free();
+    stream.close();
+    FileUtil.fullyDelete(path);
+  }
+}

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java?rev=1574171&r1=1574170&r2=1574171&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
(original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
Tue Mar  4 18:22:26 2014
@@ -1064,6 +1064,45 @@ public abstract class FSAclBaseTest {
   }
 
   @Test
+  public void testDefaultAclRenamedFile() throws Exception {
+    Path dirPath = new Path(path, "dir");
+    FileSystem.mkdirs(fs, dirPath, FsPermission.createImmutable((short)0750));
+    List<AclEntry> aclSpec = Lists.newArrayList(
+      aclEntry(DEFAULT, USER, "foo", ALL));
+    fs.setAcl(dirPath, aclSpec);
+    Path filePath = new Path(path, "file1");
+    fs.create(filePath).close();
+    fs.setPermission(filePath, FsPermission.createImmutable((short)0640));
+    Path renamedFilePath = new Path(dirPath, "file1");
+    fs.rename(filePath, renamedFilePath);
+    AclEntry[] expected = new AclEntry[] { };
+    AclStatus s = fs.getAclStatus(renamedFilePath);
+    AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
+    assertArrayEquals(expected, returned);
+    assertPermission(renamedFilePath, (short)0640);
+    assertAclFeature(renamedFilePath, false);
+  }
+
+  @Test
+  public void testDefaultAclRenamedDir() throws Exception {
+    Path dirPath = new Path(path, "dir");
+    FileSystem.mkdirs(fs, dirPath, FsPermission.createImmutable((short)0750));
+    List<AclEntry> aclSpec = Lists.newArrayList(
+      aclEntry(DEFAULT, USER, "foo", ALL));
+    fs.setAcl(dirPath, aclSpec);
+    Path subdirPath = new Path(path, "subdir");
+    FileSystem.mkdirs(fs, subdirPath, FsPermission.createImmutable((short)0750));
+    Path renamedSubdirPath = new Path(dirPath, "subdir");
+    fs.rename(subdirPath, renamedSubdirPath);
+    AclEntry[] expected = new AclEntry[] { };
+    AclStatus s = fs.getAclStatus(renamedSubdirPath);
+    AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
+    assertArrayEquals(expected, returned);
+    assertPermission(renamedSubdirPath, (short)0750);
+    assertAclFeature(renamedSubdirPath, false);
+  }
+
+  @Test
   public void testSkipAclEnforcementPermsDisabled() throws Exception {
     Path bruceDir = new Path(path, "bruce");
     Path bruceFile = new Path(bruceDir, "file");

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLI.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLI.xml?rev=1574171&r1=1574170&r2=1574171&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLI.xml
(original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLI.xml
Tue Mar  4 18:22:26 2014
@@ -972,5 +972,55 @@
         </comparator>
       </comparators>
     </test>
+    <test>
+      <description>copyFromLocal: copying file into a directory with a default ACL</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir1</command>
+        <command>-fs NAMENODE -setfacl -m default:user:charlie:rwx /dir1</command>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes /dir1/data15bytes</command>
+        <command>-fs NAMENODE -getfacl /dir1/data15bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -R /dir1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^# file: /dir1/data15bytes$</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^# owner: USERNAME$</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^# group: supergroup$</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^user::rw-$</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^user:charlie:rwx\t#effective:r--$</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^group::r-x\t#effective:r--$</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^mask::r--$</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^other::r--$</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpAcrossOutputComparator</type>
+          <expected-output>.*(?!default).*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
   </tests>
 </configuration>



Mime
View raw message