hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From vinayakum...@apache.org
Subject svn commit: r1601151 [2/2] - in /hadoop/common/branches/HDFS-5442/hadoop-hdfs-project: hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/ hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/ hadoop-hdfs-nfs/src/main/ja...
Date Sat, 07 Jun 2014 16:29:17 GMT
Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java Sat Jun  7 16:29:10 2014
@@ -104,6 +104,7 @@ class BlockReceiver implements Closeable
   private boolean dropCacheBehindWrites;
   private long lastCacheManagementOffset = 0;
   private boolean syncBehindWrites;
+  private boolean syncBehindWritesInBackground;
 
   /** The client name.  It is empty if a datanode is the client */
   private final String clientname;
@@ -207,6 +208,8 @@ class BlockReceiver implements Closeable
         datanode.getDnConf().dropCacheBehindWrites :
           cachingStrategy.getDropBehind();
       this.syncBehindWrites = datanode.getDnConf().syncBehindWrites;
+      this.syncBehindWritesInBackground = datanode.getDnConf().
+          syncBehindWritesInBackground;
       
       final boolean isCreate = isDatanode || isTransfer 
           || stage == BlockConstructionStage.PIPELINE_SETUP_CREATE;
@@ -668,10 +671,17 @@ class BlockReceiver implements Closeable
         // of file                 
         //
         if (syncBehindWrites) {
-          NativeIO.POSIX.syncFileRangeIfPossible(outFd,
-              lastCacheManagementOffset,
-              offsetInBlock - lastCacheManagementOffset,
-              NativeIO.POSIX.SYNC_FILE_RANGE_WRITE);
+          if (syncBehindWritesInBackground) {
+            this.datanode.getFSDataset().submitBackgroundSyncFileRangeRequest(
+                block, outFd, lastCacheManagementOffset,
+                offsetInBlock - lastCacheManagementOffset,
+                NativeIO.POSIX.SYNC_FILE_RANGE_WRITE);
+          } else {
+            NativeIO.POSIX.syncFileRangeIfPossible(outFd,
+                lastCacheManagementOffset, offsetInBlock
+                    - lastCacheManagementOffset,
+                NativeIO.POSIX.SYNC_FILE_RANGE_WRITE);
+          }
         }
         //
         // For POSIX_FADV_DONTNEED, we want to drop from the beginning 

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java Sat Jun  7 16:29:10 2014
@@ -67,6 +67,7 @@ public class DNConf {
   final boolean transferToAllowed;
   final boolean dropCacheBehindWrites;
   final boolean syncBehindWrites;
+  final boolean syncBehindWritesInBackground;
   final boolean dropCacheBehindReads;
   final boolean syncOnClose;
   final boolean encryptDataTransfer;
@@ -119,6 +120,9 @@ public class DNConf {
     syncBehindWrites = conf.getBoolean(
         DFSConfigKeys.DFS_DATANODE_SYNC_BEHIND_WRITES_KEY,
         DFSConfigKeys.DFS_DATANODE_SYNC_BEHIND_WRITES_DEFAULT);
+    syncBehindWritesInBackground = conf.getBoolean(
+        DFSConfigKeys.DFS_DATANODE_SYNC_BEHIND_WRITES_IN_BACKGROUND_KEY,
+        DFSConfigKeys.DFS_DATANODE_SYNC_BEHIND_WRITES_IN_BACKGROUND_DEFAULT);
     dropCacheBehindReads = conf.getBoolean(
         DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_READS_KEY,
         DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_READS_DEFAULT);

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Sat Jun  7 16:29:10 2014
@@ -231,6 +231,9 @@ public class DataNode extends Configured
   private boolean checkDiskErrorFlag = false;
   private Object checkDiskErrorMutex = new Object();
   private long lastDiskErrorCheck;
+  private String supergroup;
+  private boolean isPermissionEnabled;
+  private String dnUserName = null;
 
   /**
    * Create the DataNode given a configuration, an array of dataDirs,
@@ -252,6 +255,11 @@ public class DataNode extends Configured
     this.getHdfsBlockLocationsEnabled = conf.getBoolean(
         DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, 
         DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT);
+    this.supergroup = conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY,
+        DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT);
+    this.isPermissionEnabled = conf.getBoolean(
+        DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY,
+        DFSConfigKeys.DFS_PERMISSIONS_ENABLED_DEFAULT);
 
     confVersion = "core-" +
         conf.get("hadoop.common.configuration.version", "UNSPECIFIED") +
@@ -432,6 +440,33 @@ public class DataNode extends Configured
       ipcServer.refreshServiceAcl(conf, new HDFSPolicyProvider());
     }
   }
+
+  /** Check whether the current user is in the superuser group. */
+  private void checkSuperuserPrivilege() throws IOException, AccessControlException {
+    if (!isPermissionEnabled) {
+      return;
+    }
+    // Try to get the ugi in the RPC call.
+    UserGroupInformation callerUgi = ipcServer.getRemoteUser();
+    if (callerUgi == null) {
+      // This is not from RPC.
+      callerUgi = UserGroupInformation.getCurrentUser();
+    }
+
+    // Is this by the DN user itself?
+    assert dnUserName != null;
+    if (callerUgi.getShortUserName().equals(dnUserName)) {
+      return;
+    }
+
+    // Is the user a member of the super group?
+    List<String> groups = Arrays.asList(callerUgi.getGroupNames());
+    if (groups.contains(supergroup)) {
+      return;
+    }
+    // Not a superuser.
+    throw new AccessControlException();
+  }
   
 /**
  * Initialize the datanode's periodic scanners:
@@ -735,6 +770,11 @@ public class DataNode extends Configured
   
     // BlockPoolTokenSecretManager is required to create ipc server.
     this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();
+
+    // Login is done by now. Set the DN user name.
+    dnUserName = UserGroupInformation.getCurrentUser().getShortUserName();
+    LOG.info("dnUserName = " + dnUserName);
+    LOG.info("supergroup = " + supergroup);
     initIpcServer(conf);
 
     metrics = DataNodeMetrics.create(conf, getDisplayName());
@@ -2414,6 +2454,7 @@ public class DataNode extends Configured
 
   @Override // ClientDatanodeProtocol
   public void refreshNamenodes() throws IOException {
+    checkSuperuserPrivilege();
     conf = new Configuration();
     refreshNamenodes(conf);
   }
@@ -2421,6 +2462,7 @@ public class DataNode extends Configured
   @Override // ClientDatanodeProtocol
   public void deleteBlockPool(String blockPoolId, boolean force)
       throws IOException {
+    checkSuperuserPrivilege();
     LOG.info("deleteBlockPool command received for block pool " + blockPoolId
         + ", force=" + force);
     if (blockPoolManager.get(blockPoolId) != null) {
@@ -2436,6 +2478,7 @@ public class DataNode extends Configured
 
   @Override // ClientDatanodeProtocol
   public synchronized void shutdownDatanode(boolean forUpgrade) throws IOException {
+    checkSuperuserPrivilege();
     LOG.info("shutdownDatanode command received (upgrade=" + forUpgrade +
         "). Shutting down Datanode...");
 
@@ -2602,4 +2645,4 @@ public class DataNode extends Configured
       return lastDiskErrorCheck;
     }
   }
-}
\ No newline at end of file
+}

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/AvailableSpaceVolumeChoosingPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/AvailableSpaceVolumeChoosingPolicy.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/AvailableSpaceVolumeChoosingPolicy.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/AvailableSpaceVolumeChoosingPolicy.java Sat Jun  7 16:29:10 2014
@@ -45,11 +45,19 @@ public class AvailableSpaceVolumeChoosin
   
   private static final Log LOG = LogFactory.getLog(AvailableSpaceVolumeChoosingPolicy.class);
   
-  private static final Random RAND = new Random();
+  private final Random random;
   
   private long balancedSpaceThreshold = DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_DEFAULT;
   private float balancedPreferencePercent = DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_DEFAULT;
 
+  AvailableSpaceVolumeChoosingPolicy(Random random) {
+    this.random = random;
+  }
+
+  public AvailableSpaceVolumeChoosingPolicy() {
+    this(new Random());
+  }
+
   @Override
   public synchronized void setConf(Configuration conf) {
     balancedSpaceThreshold = conf.getLong(
@@ -128,7 +136,7 @@ public class AvailableSpaceVolumeChoosin
           (highAvailableVolumes.size() * balancedPreferencePercent) /
           preferencePercentScaler;
       if (mostAvailableAmongLowVolumes < replicaSize ||
-          RAND.nextFloat() < scaledPreferencePercent) {
+          random.nextFloat() < scaledPreferencePercent) {
         volume = roundRobinPolicyHighAvailable.chooseVolume(
             highAvailableVolumes,
             replicaSize);

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java Sat Jun  7 16:29:10 2014
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.da
 
 
 import java.io.File;
+import java.io.FileDescriptor;
 import java.io.IOException;
 import java.io.InputStream;
 import java.util.List;
@@ -431,5 +432,12 @@ public interface FsDatasetSpi<V extends 
    * @return true when trash is enabled
    */
   public boolean trashEnabled(String bpid);
+
+  /**
+   * submit a sync_file_range request to AsyncDiskService
+   */
+  public void submitBackgroundSyncFileRangeRequest(final ExtendedBlock block,
+      final FileDescriptor fd, final long offset, final long nbytes,
+      final int flags);
 }
 

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java Sat Jun  7 16:29:10 2014
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
 import java.io.File;
+import java.io.FileDescriptor;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.concurrent.LinkedBlockingQueue;
@@ -31,6 +32,8 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
+import org.apache.hadoop.io.nativeio.NativeIO;
+import org.apache.hadoop.io.nativeio.NativeIOException;
 
 /**
  * This class is a container of multiple thread pools, each for a volume,
@@ -42,6 +45,7 @@ import org.apache.hadoop.hdfs.server.pro
  * can be slow, and we don't want to use a single thread pool because that
  * is inefficient when we have more than 1 volume.  AsyncDiskService is the
  * solution for these.
+ * Another example of async disk operation is requesting sync_file_range().
  * 
  * This class and {@link org.apache.hadoop.util.AsyncDiskService} are similar.
  * They should be combined.
@@ -148,6 +152,21 @@ class FsDatasetAsyncDiskService {
     }
   }
 
+  public void submitSyncFileRangeRequest(FsVolumeImpl volume,
+      final FileDescriptor fd, final long offset, final long nbytes,
+      final int flags) {
+    execute(volume.getCurrentDir(), new Runnable() {
+      @Override
+      public void run() {
+        try {
+          NativeIO.POSIX.syncFileRangeIfPossible(fd, offset, nbytes, flags);
+        } catch (NativeIOException e) {
+          LOG.warn("sync_file_range error", e);
+        }
+      }
+    });
+  }
+
   /**
    * Delete the block file and meta file from the disk asynchronously, adjust
    * dfsUsed statistics accordingly.

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java Sat Jun  7 16:29:10 2014
@@ -1907,5 +1907,13 @@ class FsDatasetImpl implements FsDataset
     }
     return new RollingLogsImpl(dir, prefix);
   }
+
+  @Override
+  public void submitBackgroundSyncFileRangeRequest(ExtendedBlock block,
+      FileDescriptor fd, long offset, long nbytes, int flags) {
+    FsVolumeImpl fsVolumeImpl = this.getVolume(block);
+    asyncDiskService.submitSyncFileRangeRequest(fsVolumeImpl, fd, offset,
+        nbytes, flags);
+  }
 }
 

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Sat Jun  7 16:29:10 2014
@@ -145,6 +145,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.StorageType;
+import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
@@ -1632,9 +1633,11 @@ public class FSNamesystem implements Nam
       blockManager.getDatanodeManager().sortLocatedBlocks(
           clientMachine, blocks.getLocatedBlocks());
       
+      // lastBlock is not part of getLocatedBlocks(), might need to sort it too
       LocatedBlock lastBlock = blocks.getLastLocatedBlock();
       if (lastBlock != null) {
-        ArrayList<LocatedBlock> lastBlockList = new ArrayList<LocatedBlock>();
+        ArrayList<LocatedBlock> lastBlockList =
+            Lists.newArrayListWithCapacity(1);
         lastBlockList.add(lastBlock);
         blockManager.getDatanodeManager().sortLocatedBlocks(
                               clientMachine, lastBlockList);
@@ -7950,6 +7953,29 @@ public class FSNamesystem implements Nam
       readUnlock();
     }
   }
+
+  List<XAttr> listXAttrs(String src) throws IOException {
+    nnConf.checkXAttrsConfigFlag();
+    final FSPermissionChecker pc = getPermissionChecker();
+    checkOperation(OperationCategory.READ);
+    readLock();
+    try {
+      checkOperation(OperationCategory.READ);
+      if (isPermissionEnabled) {
+        /* To access xattr names, you need EXECUTE in the owning directory. */
+        checkParentAccess(pc, src, FsAction.EXECUTE);
+      }
+      final List<XAttr> all = dir.getXAttrs(src);
+      final List<XAttr> filteredAll = XAttrPermissionFilter.
+        filterXAttrsForApi(pc, all);
+      return filteredAll;
+    } catch (AccessControlException e) {
+      logAuditEvent(false, "listXAttrs", src);
+      throw e;
+    } finally {
+      readUnlock();
+    }
+  }
   
   void removeXAttr(String src, XAttr xAttr) throws IOException {
     nnConf.checkXAttrsConfigFlag();

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java Sat Jun  7 16:29:10 2014
@@ -1395,6 +1395,11 @@ class NameNodeRpcServer implements Namen
       throws IOException {
     return namesystem.getXAttrs(src, xAttrs);
   }
+
+  @Override
+  public List<XAttr> listXAttrs(String src) throws IOException {
+    return namesystem.listXAttrs(src);
+  }
   
   @Override
   public void removeXAttr(String src, XAttr xAttr) throws IOException {

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java Sat Jun  7 16:29:10 2014
@@ -121,6 +121,7 @@ import org.apache.hadoop.security.token.
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Charsets;
+import com.google.common.collect.Lists;
 import com.sun.jersey.spi.container.ResourceFilters;
 
 /** Web-hdfs NameNode implementation. */
@@ -712,12 +713,12 @@ public class NamenodeWebHdfsMethods {
       @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
           final BufferSizeParam bufferSize,
       @QueryParam(XAttrNameParam.NAME) @DefaultValue(XAttrNameParam.DEFAULT) 
-          final XAttrNameParam xattrName,
+          final List<XAttrNameParam> xattrNames,
       @QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT) 
           final XAttrEncodingParam xattrEncoding
       ) throws IOException, InterruptedException {
     return get(ugi, delegation, username, doAsUser, ROOT, op, offset, length,
-        renewer, bufferSize, xattrName, xattrEncoding);
+        renewer, bufferSize, xattrNames, xattrEncoding);
   }
 
   /** Handle HTTP GET request. */
@@ -744,13 +745,13 @@ public class NamenodeWebHdfsMethods {
       @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
           final BufferSizeParam bufferSize,
       @QueryParam(XAttrNameParam.NAME) @DefaultValue(XAttrNameParam.DEFAULT) 
-          final XAttrNameParam xattrName,
+          final List<XAttrNameParam> xattrNames,
       @QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT) 
           final XAttrEncodingParam xattrEncoding
       ) throws IOException, InterruptedException {
 
     init(ugi, delegation, username, doAsUser, path, op, offset, length,
-        renewer, bufferSize, xattrName, xattrEncoding);
+        renewer, bufferSize, xattrEncoding);
 
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
       @Override
@@ -758,7 +759,7 @@ public class NamenodeWebHdfsMethods {
         try {
           return get(ugi, delegation, username, doAsUser,
               path.getAbsolutePath(), op, offset, length, renewer, bufferSize,
-              xattrName, xattrEncoding);
+              xattrNames, xattrEncoding);
         } finally {
           reset();
         }
@@ -777,7 +778,7 @@ public class NamenodeWebHdfsMethods {
       final LengthParam length,
       final RenewerParam renewer,
       final BufferSizeParam bufferSize,
-      final XAttrNameParam xattrName,
+      final List<XAttrNameParam> xattrNames,
       final XAttrEncodingParam xattrEncoding
       ) throws IOException, URISyntaxException {
     final NameNode namenode = (NameNode)context.getAttribute("name.node");
@@ -853,19 +854,27 @@ public class NamenodeWebHdfsMethods {
       final String js = JsonUtil.toJsonString(status);
       return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
     }
-    case GETXATTR: {
-      XAttr xAttr = XAttrHelper.getFirstXAttr(np.getXAttrs(fullpath,
-          XAttrHelper.buildXAttrAsList(xattrName.getXAttrName())));
-      final String js = JsonUtil.toJsonString(xAttr,
-          xattrEncoding.getEncoding());
-      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
-    }
     case GETXATTRS: {
-      List<XAttr> xAttrs = np.getXAttrs(fullpath, null);
+      List<String> names = null;
+      if (xattrNames != null) {
+        names = Lists.newArrayListWithCapacity(xattrNames.size());
+        for (XAttrNameParam xattrName : xattrNames) {
+          if (xattrName.getXAttrName() != null) {
+            names.add(xattrName.getXAttrName());
+          }
+        }
+      }
+      List<XAttr> xAttrs = np.getXAttrs(fullpath, (names != null && 
+          !names.isEmpty()) ? XAttrHelper.buildXAttrs(names) : null);
       final String js = JsonUtil.toJsonString(xAttrs,
           xattrEncoding.getEncoding());
       return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
     }
+    case LISTXATTRS: {
+      final List<XAttr> xAttrs = np.listXAttrs(fullpath);
+      final String js = JsonUtil.toJsonString(xAttrs);
+      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
+    }
     default:
       throw new UnsupportedOperationException(op + " is not supported");
     }

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java Sat Jun  7 16:29:10 2014
@@ -35,6 +35,7 @@ import org.apache.hadoop.util.DataChecks
 import org.apache.hadoop.util.StringUtils;
 import org.mortbay.util.ajax.JSON;
 
+import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 
 import java.io.ByteArrayInputStream;
@@ -665,21 +666,6 @@ public class JsonUtil {
     return aclStatusBuilder.build();
   }
   
-  public static String toJsonString(final XAttr xAttr, 
-      final XAttrCodec encoding) throws IOException {
-    if (xAttr == null) {
-      return "{}";
-    }
-    final Map<String, Object> m = new TreeMap<String, Object>();
-    m.put("name", XAttrHelper.getPrefixName(xAttr));
-    m.put("value", xAttr.getValue() != null ? 
-        XAttrCodec.encodeValue(xAttr.getValue(), encoding) : null);
-    final Map<String, Map<String, Object>> finalMap =
-        new TreeMap<String, Map<String, Object>>();
-    finalMap.put(XAttr.class.getSimpleName(), m);
-    return JSON.toString(finalMap);
-  }
-  
   private static Map<String, Object> toJsonMap(final XAttr xAttr,
       final XAttrCodec encoding) throws IOException {
     if (xAttr == null) {
@@ -715,18 +701,30 @@ public class JsonUtil {
     return JSON.toString(finalMap);
   }
   
-  public static XAttr toXAttr(final Map<?, ?> json) throws IOException {
+  public static String toJsonString(final List<XAttr> xAttrs)
+    throws IOException {
+    final List<String> names = Lists.newArrayListWithCapacity(xAttrs.size());
+    for (XAttr xAttr : xAttrs) {
+      names.add(XAttrHelper.getPrefixName(xAttr));
+    }
+    String ret = JSON.toString(names);
+    final Map<String, Object> finalMap = new TreeMap<String, Object>();
+    finalMap.put("XAttrNames", ret);
+    return JSON.toString(finalMap);
+  }
+
+  public static byte[] getXAttr(final Map<?, ?> json, final String name) 
+      throws IOException {
     if (json == null) {
       return null;
     }
     
-    Map<?, ?> m = (Map<?, ?>) json.get(XAttr.class.getSimpleName());
-    if (m == null) {
-      return null;
+    Map<String, byte[]> xAttrs = toXAttrs(json);
+    if (xAttrs != null) {
+      return xAttrs.get(name);
     }
-    String name = (String) m.get("name");
-    String value = (String) m.get("value");
-    return XAttrHelper.buildXAttr(name, decodeXAttrValue(value));
+    
+    return null;
   }
   
   public static Map<String, byte[]> toXAttrs(final Map<?, ?> json) 
@@ -738,28 +736,23 @@ public class JsonUtil {
     return toXAttrMap((Object[])json.get("XAttrs"));
   }
   
-  public static Map<String, byte[]> toXAttrs(final Map<?, ?> json, 
-      List<String> names) throws IOException {
-    if (json == null || names == null) {
+  public static List<String> toXAttrNames(final Map<?, ?> json)
+      throws IOException {
+    if (json == null) {
       return null;
     }
-    if (names.isEmpty()) {
-      return Maps.newHashMap();
-    }
-    Map<String, byte[]> xAttrs = toXAttrs(json);
-    if (xAttrs == null || xAttrs.isEmpty()) {
-      return xAttrs;
-    }
-    
-    Map<String, byte[]> result = Maps.newHashMap();
-    for (String name : names) {
-      if (xAttrs.containsKey(name)) {
-        result.put(name, xAttrs.get(name));
-      }
+
+    final String namesInJson = (String) json.get("XAttrNames");
+    final Object[] xattrs = (Object[]) JSON.parse(namesInJson);
+    final List<String> names =
+      Lists.newArrayListWithCapacity(json.keySet().size());
+
+    for (int i = 0; i < xattrs.length; i++) {
+        names.add((String) (xattrs[i]));
     }
-    return result;
+    return names;
   }
-  
+
   private static Map<String, byte[]> toXAttrMap(final Object[] objects) 
       throws IOException {
     if (objects == null) {

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Sat Jun  7 16:29:10 2014
@@ -50,7 +50,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.XAttrCodec;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
@@ -80,6 +79,7 @@ import org.mortbay.util.ajax.JSON;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Charsets;
+import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 
 /** A FileSystem for HDFS over the web. */
@@ -601,6 +601,13 @@ public class WebHdfsFileSystem extends F
       this.parameters = parameters;
     }
     
+    AbstractFsPathRunner(final HttpOpParam.Op op, Param<?,?>[] parameters,
+        final Path fspath) {
+      super(op, false);
+      this.fspath = fspath;
+      this.parameters = parameters;
+    }
+    
     @Override
     protected URL getUrl() throws IOException {
       return toUrl(op, fspath, parameters);
@@ -630,6 +637,11 @@ public class WebHdfsFileSystem extends F
       super(op, fspath, parameters);
     }
     
+    FsPathResponseRunner(final HttpOpParam.Op op, Param<?,?>[] parameters,
+        final Path fspath) {
+      super(op, parameters, fspath);
+    }
+    
     @Override
     final T getResponse(HttpURLConnection conn) throws IOException {
       try {
@@ -834,14 +846,13 @@ public class WebHdfsFileSystem extends F
   }
   
   @Override
-  public byte[] getXAttr(Path p, String name) throws IOException {
-    final HttpOpParam.Op op = GetOpParam.Op.GETXATTR;
+  public byte[] getXAttr(Path p, final String name) throws IOException {
+    final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS;
     return new FsPathResponseRunner<byte[]>(op, p, new XAttrNameParam(name), 
         new XAttrEncodingParam(XAttrCodec.HEX)) {
       @Override
       byte[] decodeResponse(Map<?, ?> json) throws IOException {
-        XAttr xAttr = JsonUtil.toXAttr(json);
-        return xAttr != null ? xAttr.getValue() : null;
+        return JsonUtil.getXAttr(json, name);
       }
     }.run();
   }
@@ -861,17 +872,35 @@ public class WebHdfsFileSystem extends F
   @Override
   public Map<String, byte[]> getXAttrs(Path p, final List<String> names) 
       throws IOException {
+    Preconditions.checkArgument(names != null && !names.isEmpty(), 
+        "XAttr names cannot be null or empty.");
+    Param<?,?>[] parameters = new Param<?,?>[names.size() + 1];
+    for (int i = 0; i < parameters.length - 1; i++) {
+      parameters[i] = new XAttrNameParam(names.get(i));
+    }
+    parameters[parameters.length - 1] = new XAttrEncodingParam(XAttrCodec.HEX);
+    
     final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS;
-    return new FsPathResponseRunner<Map<String, byte[]>>(op, p, 
-        new XAttrEncodingParam(XAttrCodec.HEX)) {
+    return new FsPathResponseRunner<Map<String, byte[]>>(op, parameters, p) {
       @Override
       Map<String, byte[]> decodeResponse(Map<?, ?> json) throws IOException {
-        return JsonUtil.toXAttrs(json, names);
+        return JsonUtil.toXAttrs(json);
       }
     }.run();
   }
   
   @Override
+  public List<String> listXAttrs(Path p) throws IOException {
+    final HttpOpParam.Op op = GetOpParam.Op.LISTXATTRS;
+    return new FsPathResponseRunner<List<String>>(op, p) {
+      @Override
+      List<String> decodeResponse(Map<?, ?> json) throws IOException {
+        return JsonUtil.toXAttrNames(json);
+      }
+    }.run();
+  }
+
+  @Override
   public void removeXAttr(Path p, String name) throws IOException {
     statistics.incrementWriteOps(1);
     final HttpOpParam.Op op = PutOpParam.Op.REMOVEXATTR;

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java Sat Jun  7 16:29:10 2014
@@ -36,8 +36,8 @@ public class GetOpParam extends HttpOpPa
     /** GET_BLOCK_LOCATIONS is a private unstable op. */
     GET_BLOCK_LOCATIONS(false, HttpURLConnection.HTTP_OK),
     GETACLSTATUS(false, HttpURLConnection.HTTP_OK),
-    GETXATTR(false, HttpURLConnection.HTTP_OK),
     GETXATTRS(false, HttpURLConnection.HTTP_OK),
+    LISTXATTRS(false, HttpURLConnection.HTTP_OK),
 
     NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
 

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto Sat Jun  7 16:29:10 2014
@@ -764,6 +764,8 @@ service ClientNamenodeProtocol {
       returns(SetXAttrResponseProto);
   rpc getXAttrs(GetXAttrsRequestProto)
       returns(GetXAttrsResponseProto);
+  rpc listXAttrs(ListXAttrsRequestProto)
+      returns(ListXAttrsResponseProto);
   rpc removeXAttr(RemoveXAttrRequestProto)
       returns(RemoveXAttrResponseProto);
 }

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto Sat Jun  7 16:29:10 2014
@@ -62,6 +62,14 @@ message GetXAttrsResponseProto {
   repeated XAttrProto xAttrs = 1;
 }
 
+message ListXAttrsRequestProto {
+  required string src = 1;
+}
+
+message ListXAttrsResponseProto {
+  repeated XAttrProto xAttrs = 1;
+}
+
 message RemoveXAttrRequestProto {
   required string src        = 1;
   optional XAttrProto xAttr  = 2;

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Sat Jun  7 16:29:10 2014
@@ -1296,8 +1296,56 @@
     non-zero integer.
   </description>
 </property>
+
+<property>
+  <name>nfs.server.port</name>
+  <value>2049</value>
+  <description>
+      Specify the port number used by Hadoop NFS.
+  </description>
+</property>
+
+<property>
+  <name>nfs.mountd.port</name>
+  <value>4242</value>
+  <description>
+      Specify the port number used by Hadoop mount daemon.
+  </description>
+</property>
+
+<property>    
+  <name>nfs.dump.dir</name>
+  <value>/tmp/.hdfs-nfs</value>
+  <description>
+    This directory is used to temporarily save out-of-order writes before
+    writing to HDFS. For each file, the out-of-order writes are dumped after
+    they are accumulated to exceed certain threshold (e.g., 1MB) in memory. 
+    One needs to make sure the directory has enough space.
+  </description>
+</property>
+
+<property>
+  <name>nfs.rtmax</name>
+  <value>1048576</value>
+  <description>This is the maximum size in bytes of a READ request
+    supported by the NFS gateway. If you change this, make sure you
+    also update the nfs mount's rsize(add rsize= # of bytes to the 
+    mount directive).
+  </description>
+</property>
+
+<property>
+  <name>nfs.wtmax</name>
+  <value>1048576</value>
+  <description>This is the maximum size in bytes of a WRITE request
+    supported by the NFS gateway. If you change this, make sure you
+    also update the nfs mount's wsize(add wsize= # of bytes to the 
+    mount directive).
+  </description>
+</property>
+
 <property>
-  <name>dfs.nfs.keytab.file</name>
+  <name>nfs.keytab.file</name>
   <value></value>
   <description>
     *Note*: Advanced property. Change with caution.
@@ -1307,7 +1355,7 @@
 </property>
 
 <property>
-  <name>dfs.nfs.kerberos.principal</name>
+  <name>nfs.kerberos.principal</name>
   <value></value>
   <description>
     *Note*: Advanced property. Change with caution.
@@ -1318,7 +1366,7 @@
 </property>
 
 <property>
-  <name>dfs.nfs.allow.insecure.ports</name>
+  <name>nfs.allow.insecure.ports</name>
   <value>true</value>
   <description>
     When set to false, client connections originating from unprivileged ports

Propchange: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1598456-1601150

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js Sat Jun  7 16:29:10 2014
@@ -45,7 +45,7 @@
     return function (jqxhr, text, err) {
       switch(jqxhr.status) {
         case 401:
-          var msg = '<p>Authentication failed when trying to open ' + url + ': Unauthrozied.</p>';
+          var msg = '<p>Authentication failed when trying to open ' + url + ': Unauthorized.</p>';
           break;
         case 403:
           if(jqxhr.responseJSON !== undefined && jqxhr.responseJSON.RemoteException !== undefined) {

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm Sat Jun  7 16:29:10 2014
@@ -76,14 +76,14 @@ HDFS NFS Gateway
 
 ----
   <property>
-    <name>dfs.nfs.keytab.file</name>
+    <name>nfs.keytab.file</name>
     <value>/etc/hadoop/conf/nfsserver.keytab</value> <!-- path to the nfs gateway keytab -->
   </property>
 ----
 
 ----
   <property>
-    <name>dfs.nfs.kerberos.principal</name>
+    <name>nfs.kerberos.principal</name>
     <value>nfsserver/_HOST@YOUR-REALM.COM</value>
   </property>
 ----
@@ -121,7 +121,7 @@ HDFS NFS Gateway
 
 ----
   <property>    
-    <name>dfs.nfs3.dump.dir</name>
+    <name>nfs.dump.dir</name>
     <value>/tmp/.hdfs-nfs</value>
   </property>
 ---- 
@@ -134,7 +134,7 @@ HDFS NFS Gateway
 
 ----
 <property>
-  <name>dfs.nfs.rtmax</name>
+  <name>nfs.rtmax</name>
   <value>1048576</value>
   <description>This is the maximum size in bytes of a READ request
     supported by the NFS gateway. If you change this, make sure you
@@ -146,7 +146,7 @@ HDFS NFS Gateway
 
 ----
 <property>
-  <name>dfs.nfs.wtmax</name>
+  <name>nfs.wtmax</name>
   <value>65536</value>
   <description>This is the maximum size in bytes of a WRITE request
     supported by the NFS gateway. If you change this, make sure you
@@ -167,7 +167,7 @@ HDFS NFS Gateway
 
 ----
 <property>
-  <name>dfs.nfs.exports.allowed.hosts</name>
+  <name>nfs.exports.allowed.hosts</name>
   <value>* rw</value>
 </property>
 ----
@@ -345,7 +345,7 @@ HDFS NFS Gateway
   file in the event one wishes to access the HDFS NFS Gateway from a system with
   a completely disparate set of UIDs/GIDs. By default this file is located at
   "/etc/nfs.map", but a custom location can be configured by setting the
-  "dfs.nfs.static.mapping.file" property to the path of the static mapping file.
+  "nfs.static.mapping.file" property to the path of the static mapping file.
   The format of the static mapping file is similar to what is described in the
   exports(5) manual page, but roughly it is:
 

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java Sat Jun  7 16:29:10 2014
@@ -167,6 +167,9 @@ public class TestGetBlocks {
       if (stm != null) {
         stm.close();
       }
+      if (client != null) {
+        client.close();
+      }
       cluster.shutdown();
     }
   }

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java Sat Jun  7 16:29:10 2014
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 import java.io.File;
+import java.io.FileDescriptor;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
@@ -1112,5 +1113,11 @@ public class SimulatedFSDataset implemen
   public FsVolumeSpi getVolume(ExtendedBlock b) {
     throw new UnsupportedOperationException();
   }
+
+  @Override
+  public void submitBackgroundSyncFileRangeRequest(ExtendedBlock block,
+      FileDescriptor fd, long offset, long nbytes, int flags) {
+    throw new UnsupportedOperationException();
+  }
 }
 

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestAvailableSpaceVolumeChoosingPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestAvailableSpaceVolumeChoosingPolicy.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestAvailableSpaceVolumeChoosingPolicy.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestAvailableSpaceVolumeChoosingPolicy.java Sat Jun  7 16:29:10 2014
@@ -22,6 +22,7 @@ import static org.apache.hadoop.hdfs.DFS
 
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Random;
 
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
@@ -251,10 +252,10 @@ public class TestAvailableSpaceVolumeCho
    */
   public void doRandomizedTest(float preferencePercent, int lowSpaceVolumes,
       int highSpaceVolumes) throws Exception {
-    @SuppressWarnings("unchecked")
-    final AvailableSpaceVolumeChoosingPolicy<FsVolumeSpi> policy = 
-        ReflectionUtils.newInstance(AvailableSpaceVolumeChoosingPolicy.class, null);
-    
+    Random random = new Random(123L);
+    final AvailableSpaceVolumeChoosingPolicy<FsVolumeSpi> policy =
+        new AvailableSpaceVolumeChoosingPolicy<FsVolumeSpi>(random);
+
     List<FsVolumeSpi> volumes = new ArrayList<FsVolumeSpi>();
     
     // Volumes with 1MB free space

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java Sat Jun  7 16:29:10 2014
@@ -17,7 +17,10 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.io.FileNotFoundException;
+import java.security.PrivilegedExceptionAction;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
@@ -32,8 +35,11 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.test.GenericTestUtils;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -381,6 +387,111 @@ public class FSXAttrBaseTest {
     
     fs.removeXAttr(path, name3);
   }
+
+  /**
+   * Test the listXAttrs api.
+   * listXAttrs on a path that doesn't exist.
+   * listXAttrs on a path with no XAttrs
+   * Check basic functionality.
+   * Check that read access to parent dir is not enough to get xattr names
+   * Check that write access to the parent dir is not enough to get names
+   * Check that execute/scan access to the parent dir is sufficient to get
+   *  xattr names.
+   */
+  @Test(timeout = 120000)
+  public void testListXAttrs() throws Exception {
+    final UserGroupInformation user = UserGroupInformation.
+      createUserForTesting("user", new String[] {"mygroup"});
+
+    /* listXAttrs in a path that doesn't exist. */
+    try {
+      fs.listXAttrs(path);
+      fail("expected FileNotFoundException");
+    } catch (FileNotFoundException e) {
+      GenericTestUtils.assertExceptionContains("cannot find", e);
+    }
+
+    FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 0750));
+
+    /* listXAttrs on a path with no XAttrs.*/
+    final List<String> noXAttrs = fs.listXAttrs(path);
+    assertTrue("XAttrs were found?", noXAttrs.size() == 0);
+
+    fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
+    fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
+
+    /** Check basic functionality. */
+    final List<String> xattrNames = fs.listXAttrs(path);
+    assertTrue(xattrNames.contains(name1));
+    assertTrue(xattrNames.contains(name2));
+    assertTrue(xattrNames.size() == 2);
+
+    /* Check that read access to parent dir is not enough to get xattr names. */
+    fs.setPermission(path, new FsPermission((short) 0704));
+    final Path childDir = new Path(path, "child" + pathCount);
+    FileSystem.mkdirs(fs, childDir, FsPermission.createImmutable((short) 0700));
+    fs.setXAttr(childDir, name1, "1234".getBytes());
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            userFs.listXAttrs(childDir);
+            return null;
+          }
+        });
+      fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Permission denied", e);
+    }
+
+    /*
+     * Check that write access to the parent dir is not enough to get names.
+     */
+    fs.setPermission(path, new FsPermission((short) 0702));
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            userFs.listXAttrs(childDir);
+            return null;
+          }
+        });
+      fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Permission denied", e);
+    }
+
+    /*
+     * Check that execute/scan access to the parent dir is sufficient to get
+     * xattr names.
+     */
+    fs.setPermission(path, new FsPermission((short) 0701));
+    user.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          final FileSystem userFs = dfsCluster.getFileSystem();
+          userFs.listXAttrs(childDir);
+          return null;
+        }
+      });
+
+    /*
+     * Test that xattrs in the "trusted" namespace are filtered correctly.
+     */
+    fs.setXAttr(childDir, "trusted.myxattr", "1234".getBytes());
+    user.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          final FileSystem userFs = dfsCluster.getFileSystem();
+          assertTrue(userFs.listXAttrs(childDir).size() == 1);
+          return null;
+        }
+      });
+
+    assertTrue(fs.listXAttrs(childDir).size() == 2);
+  }
   
   /**
    * Steps:

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java Sat Jun  7 16:29:10 2014
@@ -153,6 +153,18 @@ public class TestStandbyCheckpoints {
     // do a checkpoint and save one to its local directories.
     HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(12));
 
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        if(tmpOivImgDir.list().length > 0) {
+          return true;
+        }
+        else {
+          return false;
+        }
+      }
+    }, 1000, 60000);
+    
     // It should have saved the oiv image too.
     assertEquals("One file is expected", 1, tmpOivImgDir.list().length);
     

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java Sat Jun  7 16:29:10 2014
@@ -143,10 +143,10 @@ public class TestSnapshotFileLength {
 
     // Make sure we can read the entire file via its non-snapshot path.
     fileStatus = hdfs.getFileStatus(file1);
-    assertEquals(fileStatus.getLen(), BLOCKSIZE * 2);
+    assertEquals("Unexpected file length", BLOCKSIZE * 2, fileStatus.getLen());
     fis = hdfs.open(file1);
     bytesRead = fis.read(buffer, 0, buffer.length);
-    assertEquals(bytesRead, BLOCKSIZE * 2);
+    assertEquals("Unexpected # bytes read", BLOCKSIZE * 2, bytesRead);
     fis.close();
 
     Path file1snap1 =
@@ -156,21 +156,23 @@ public class TestSnapshotFileLength {
     assertEquals(fileStatus.getLen(), BLOCKSIZE);
     // Make sure we can only read up to the snapshot length.
     bytesRead = fis.read(buffer, 0, buffer.length);
-    assertEquals(bytesRead, BLOCKSIZE);
+    assertEquals("Unexpected # bytes read", BLOCKSIZE, bytesRead);
     fis.close();
 
-    PrintStream psBackup = System.out;
+    PrintStream outBackup = System.out;
+    PrintStream errBackup = System.err;
     ByteArrayOutputStream bao = new ByteArrayOutputStream();
     System.setOut(new PrintStream(bao));
     System.setErr(new PrintStream(bao));
     // Make sure we can cat the file upto to snapshot length
     FsShell shell = new FsShell();
-    try{
+    try {
       ToolRunner.run(conf, shell, new String[] { "-cat",
       "/TestSnapshotFileLength/sub1/.snapshot/snapshot1/file1" });
-      assertEquals(bao.size(), BLOCKSIZE);
-    }finally{
-      System.setOut(psBackup);
+      assertEquals("Unexpected # bytes from -cat", BLOCKSIZE, bao.size());
+    } finally {
+      System.setOut(outBackup);
+      System.setErr(errBackup);
     }
   }
 }

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java Sat Jun  7 16:29:10 2014
@@ -234,6 +234,18 @@ public class TestJsonUtil {
           parsedXAttrMap.get(entry.getKey()));
     }
   }
+  
+  @Test
+  public void testGetXAttrFromJson() throws IOException {
+    String jsonString = 
+        "{\"XAttrs\":[{\"name\":\"user.a1\",\"value\":\"0x313233\"}," +
+        "{\"name\":\"user.a2\",\"value\":\"0x313131\"}]}";
+    Map<?, ?> json = (Map<?, ?>) JSON.parse(jsonString);
+    
+    // Get xattr: user.a2
+    byte[] value = JsonUtil.getXAttr(json, "user.a2");
+    Assert.assertArrayEquals(XAttrCodec.decodeValue("0x313131"), value);
+  }
 
   private void checkDecodeFailure(Map<String, Object> map) {
     try {

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java Sat Jun  7 16:29:10 2014
@@ -54,7 +54,8 @@ public class TestNetworkTopology {
         DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/d1/r2"),
         DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/d1/r2"),
         DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/d2/r3"),
-        DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r3")
+        DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r3"),
+        DFSTestUtil.getDatanodeDescriptor("8.8.8.8", "/d2/r3")
     };
     for (int i = 0; i < dataNodes.length; i++) {
       cluster.add(dataNodes[i]);
@@ -117,14 +118,14 @@ public class TestNetworkTopology {
   }
 
   @Test
-  public void testPseudoSortByDistance() throws Exception {
+  public void testSortByDistance() throws Exception {
     DatanodeDescriptor[] testNodes = new DatanodeDescriptor[3];
     
     // array contains both local node & local rack node
     testNodes[0] = dataNodes[1];
     testNodes[1] = dataNodes[2];
     testNodes[2] = dataNodes[0];
-    cluster.pseudoSortByDistance(dataNodes[0], testNodes );
+    cluster.sortByDistance(dataNodes[0], testNodes, 0xDEADBEEF);
     assertTrue(testNodes[0] == dataNodes[0]);
     assertTrue(testNodes[1] == dataNodes[1]);
     assertTrue(testNodes[2] == dataNodes[2]);
@@ -133,7 +134,7 @@ public class TestNetworkTopology {
     testNodes[0] = dataNodes[1];
     testNodes[1] = dataNodes[3];
     testNodes[2] = dataNodes[0];
-    cluster.pseudoSortByDistance(dataNodes[0], testNodes );
+    cluster.sortByDistance(dataNodes[0], testNodes, 0xDEADBEEF);
     assertTrue(testNodes[0] == dataNodes[0]);
     assertTrue(testNodes[1] == dataNodes[1]);
     assertTrue(testNodes[2] == dataNodes[3]);
@@ -142,21 +143,50 @@ public class TestNetworkTopology {
     testNodes[0] = dataNodes[5];
     testNodes[1] = dataNodes[3];
     testNodes[2] = dataNodes[1];
-    cluster.pseudoSortByDistance(dataNodes[0], testNodes );
+    cluster.sortByDistance(dataNodes[0], testNodes, 0xDEADBEEF);
     assertTrue(testNodes[0] == dataNodes[1]);
     assertTrue(testNodes[1] == dataNodes[3]);
     assertTrue(testNodes[2] == dataNodes[5]);
-    
+
     // array contains local rack node which happens to be in position 0
     testNodes[0] = dataNodes[1];
     testNodes[1] = dataNodes[5];
     testNodes[2] = dataNodes[3];
-    cluster.pseudoSortByDistance(dataNodes[0], testNodes );
-    // peudoSortByDistance does not take the "data center" layer into consideration 
+    cluster.sortByDistance(dataNodes[0], testNodes, 0xDEADBEEF);
+    assertTrue(testNodes[0] == dataNodes[1]);
+    assertTrue(testNodes[1] == dataNodes[3]);
+    assertTrue(testNodes[2] == dataNodes[5]);
+
+    // Same as previous, but with a different random seed to test randomization
+    testNodes[0] = dataNodes[1];
+    testNodes[1] = dataNodes[5];
+    testNodes[2] = dataNodes[3];
+    cluster.sortByDistance(dataNodes[0], testNodes, 0xDEAD);
+    // sortByDistance does not take the "data center" layer into consideration
     // and it doesn't sort by getDistance, so 1, 5, 3 is also valid here
     assertTrue(testNodes[0] == dataNodes[1]);
     assertTrue(testNodes[1] == dataNodes[5]);
     assertTrue(testNodes[2] == dataNodes[3]);
+
+    // Array is just local rack nodes
+    // Expect a random first node depending on the seed (normally the block ID).
+    DatanodeDescriptor first = null;
+    boolean foundRandom = false;
+    for (int i=5; i<=7; i++) {
+      testNodes[0] = dataNodes[5];
+      testNodes[1] = dataNodes[6];
+      testNodes[2] = dataNodes[7];
+      cluster.sortByDistance(dataNodes[i], testNodes, 0xBEADED+i);
+      if (first == null) {
+        first = testNodes[0];
+      } else {
+        if (first != testNodes[0]) {
+          foundRandom = true;
+          break;
+        }
+      }
+    }
+    assertTrue("Expected to find a different first location", foundRandom);
   }
   
   @Test

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml Sat Jun  7 16:29:10 2014
@@ -16118,6 +16118,364 @@
       </comparators>
     </test>
 
+    <!-- DFS tests
+        Must come before moveFromLocal tests until HDFS-6471 is fixed -->
+    <test>
+      <description>appendToFile</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -touchz /user/USERNAME/dir1/file0</command>
+        <command>-fs NAMENODE -appendToFile CLITEST_DATA/data15bytes /user/USERNAME/dir1/file0</command>
+        <command>-fs NAMENODE -cat /user/USERNAME/dir1/file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>12345678901234</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>text</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes /user/USERNAME/dir1/file0</command>
+        <command>-fs NAMENODE -text /user/USERNAME/dir1/file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r /user/USERNAME/dir1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>12345678901234</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>rmdir</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -rmdir /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -ls /user/USERNAME/</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>ExactComparator</type>
+          <expected-output></expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>rmdir with ignore-fail-on-non-empty</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes /user/USERNAME/dir1/file0</command>
+        <command>-fs NAMENODE -rmdir --ignore-fail-on-non-empty /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -ls /user/USERNAME/</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*USERNAME( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/USERNAME/dir1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>df</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes  /user/USERNAME/dir1/file0</command>
+        <command>-fs NAMENODE -df /user/USERNAME</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Filesystem( )*Size( )*Used( )*Available( )*Use%.*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>expunge</description>
+      <test-commands>
+        <command>-fs NAMENODE -expunge</command>
+      </test-commands>
+      <cleanup-commands>
+        <command></command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>ExactComparator</type>
+          <expected-output></expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>getmerge</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data30bytes /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -getmerge /user/USERNAME/dir1 data</command>
+        <command>-cat data</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
+        <command>rm data</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>12345678901234.*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <!-- Tests for snapshots -->
+    <test>
+      <description>allowSnapshot</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <dfs-admin-command>-fs NAMENODE -allowSnapshot /user/USERNAME/dir1</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <dfs-admin-command>-fs NAMENODE -disallowSnapshot /user/USERNAME/dir1</dfs-admin-command>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Allowing snaphot on /user/USERNAME/dir1 succeeded</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>disallowSnapshot</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <dfs-admin-command>-fs NAMENODE -allowSnapshot /user/USERNAME/dir1</dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -disallowSnapshot /user/USERNAME/dir1</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Disallowing snaphot on /user/USERNAME/dir1 succeeded</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>createSnapshot</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <dfs-admin-command>-fs NAMENODE -allowSnapshot /user/USERNAME/dir1</dfs-admin-command>
+        <command>-fs NAMENODE -createSnapshot /user/USERNAME/dir1 snapshot1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -deleteSnapshot /user/USERNAME/dir1 snapshot1</command>
+        <dfs-admin-command>-fs NAMENODE -disallowSnapshot /user/USERNAME/dir1</dfs-admin-command>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Created snapshot /user/USERNAME/dir1/.snapshot/snapshot1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>renameSnapshot</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <dfs-admin-command>-fs NAMENODE -allowSnapshot /user/USERNAME/dir1</dfs-admin-command>
+        <command>-fs NAMENODE -createSnapshot /user/USERNAME/dir1 snapshot1</command>
+        <command>-fs NAMENODE -renameSnapshot /user/USERNAME/dir1 snapshot1 snapshot2</command>
+        <command>-fs NAMENODE -ls /user/USERNAME/dir1/.snapshot</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -deleteSnapshot /user/USERNAME/dir1 snapshot2</command>
+        <dfs-admin-command>-fs NAMENODE -disallowSnapshot /user/USERNAME/dir1</dfs-admin-command>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*USERNAME( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/USERNAME/dir1/.snapshot/snapshot2</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>deleteSnapshot</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <dfs-admin-command>-fs NAMENODE -allowSnapshot /user/USERNAME/dir1</dfs-admin-command>
+        <command>-fs NAMENODE -createSnapshot /user/USERNAME/dir1 snapshot1</command>
+        <command>-fs NAMENODE -deleteSnapshot /user/USERNAME/dir1 snapshot1</command>
+        <command>-fs NAMENODE -ls /user/USERNAME/dir1/.snapshot</command>
+      </test-commands>
+      <cleanup-commands>
+        <dfs-admin-command>-fs NAMENODE -disallowSnapshot /user/USERNAME/dir1</dfs-admin-command>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>ExactComparator</type>
+          <expected-output></expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <!-- DFSadmin tests -->
+    <test>
+      <description>refreshUserToGroupsMappings</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -refreshUserToGroupsMappings</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command></command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>ExactComparator</type>
+          <expected-output></expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>refreshSuperUserGroupsConfiguration</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -refreshSuperUserGroupsConfiguration</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command></command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>ExactComparator</type>
+          <expected-output></expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>setQuota</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 3 /user/USERNAME/dir1</dfs-admin-command>
+        <command>-fs NAMENODE -count -q /user/USERNAME/dir1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( )*3.*/user/USERNAME/dir1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>clrQuota</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 3 /user/USERNAME/dir1</dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -clrQuota /user/USERNAME/dir1</dfs-admin-command>
+        <command>-fs NAMENODE -count -q /user/USERNAME/dir1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( )*none.*/user/USERNAME/dir1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>setSpaceQuota</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1G /user/USERNAME/dir1</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r /user/USERNAME/dir1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>ExactComparator</type>
+          <expected-output></expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>setBalancerBandwidth</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -setBalancerBandwidth 104857600</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command></command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>ExactComparator</type>
+          <expected-output></expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>finalizeUpgrade</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -finalizeUpgrade</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command></command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>ExactComparator</type>
+          <expected-output></expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
     <!-- Tests for moveFromLocal -->
     <test> <!-- TESTED -->
       <description>moveFromLocal: moving non existent file(absolute path)</description>



Mime
View raw message