hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From vinayakum...@apache.org
Subject svn commit: r1601151 - in /hadoop/common/branches/HDFS-5442/hadoop-common-project: hadoop-common/ hadoop-common/src/main/java/ hadoop-common/src/main/java/org/apache/hadoop/conf/ hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ hadoop-comm...
Date Sat, 07 Jun 2014 16:29:16 GMT
Author: vinayakumarb
Date: Sat Jun  7 16:29:10 2014
New Revision: 1601151

URL: http://svn.apache.org/r1601151
Log:
Merged changes from trunk

Added:
    hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java
      - copied unchanged from r1601150, hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java
    hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGraphiteMetrics.java
      - copied unchanged from r1601150, hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGraphiteMetrics.java
Modified:
    hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/CHANGES.txt   (contents, props changed)
    hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/   (props changed)
    hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
    hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
    hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
    hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
    hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
    hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
    hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
    hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
    hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
    hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
    hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
    hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
    hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
    hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
    hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
    hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetworkTopologyWithNodeGroup.java
    hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java
    hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java
    hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSCacheKeyProvider.java
    hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
    hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java
    hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java
    hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
    hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java
    hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsExports.java
    hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestIdUserGroup.java

Modified: hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/CHANGES.txt Sat Jun  7 16:29:10 2014
@@ -334,6 +334,11 @@ Trunk (Unreleased)
     HADOOP-10625. Trim configuration names when putting/getting them
     to properties. (Wangda Tan via xgong)
 
+    HADOOP-10645. TestKMS fails because race condition writing acl files. (tucu)
+
+    HADOOP-10611. KMS, keyVersion name should not be assumed to be 
+    keyName@versionNumber. (tucu)
+
   OPTIMIZATIONS
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
@@ -366,6 +371,8 @@ Release 2.5.0 - UNRELEASED
 
     HADOOP-10498. Add support for proxy server. (daryn)
 
+    HADOOP-9704. Write metrics sink plugin for Hadoop/Graphite (Chu Tong, Alex Newman and Babak Behzad via raviprak)
+
   IMPROVEMENTS
 
     HADOOP-10451. Remove unused field and imports from SaslRpcServer.
@@ -522,6 +529,13 @@ Release 2.5.0 - UNRELEASED
     HADOOP-10638. Updating hadoop-daemon.sh to work as expected when nfs is
     started as a privileged user. (Manikandan Narayanaswamy via atm)
 
+    HADOOP-10630. Possible race condition in RetryInvocationHandler. (jing9)
+
+    HADOOP-10658. SSLFactory expects truststores being configured. (tucu via atm)
+
+    HADOOP-10647. String Format Exception in SwiftNativeFileSystemStore.java.
+    (Gene Kim via stevel)
+
 Release 2.4.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

Propchange: hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/CHANGES.txt
------------------------------------------------------------------------------
  Merged /hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt:r1600970
  Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt:r1598456-1601150

Propchange: hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java:r1598456-1601150

Modified: hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java Sat Jun  7 16:29:10 2014
@@ -423,7 +423,9 @@ public class Configuration implements It
       new DeprecationDelta("fs.default.name", 
         CommonConfigurationKeys.FS_DEFAULT_NAME_KEY),
       new DeprecationDelta("dfs.umaskmode",
-        CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY)
+        CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY),
+      new DeprecationDelta("dfs.nfs.exports.allowed.hosts",
+          CommonConfigurationKeys.NFS_EXPORTS_ALLOWED_HOSTS_KEY)
     };
 
   /**

Modified: hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java Sat Jun  7 16:29:10 2014
@@ -517,8 +517,4 @@ public class KMSClientProvider extends K
     // the server should not keep in memory state on behalf of clients either.
   }
 
-  @VisibleForTesting
-  public static String buildVersionName(String name, int version) {
-    return KeyProvider.buildVersionName(name, version);
-  }
 }

Modified: hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java Sat Jun  7 16:29:10 2014
@@ -1169,6 +1169,30 @@ public abstract class AbstractFileSystem
   }
 
   /**
+   * Get all of the xattr names for a file or directory.
+   * Only the xattr names for which the logged-in user has permissions to view
+   * are returned.
+   * <p/>
+   * A regular user can only get xattr names for the "user" namespace.
+   * The super user can only get xattr names for the "user" and "trusted"
+   * namespaces.
+   * The xattr names in the "security" and "system" namespaces are only
+   * used/exposed internally by/to the FS impl.
+   * <p/>
+   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
+   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   *
+   * @param path Path to get extended attributes
+   * @return Map<String, byte[]> describing the XAttrs of the file or directory
+   * @throws IOException
+   */
+  public List<String> listXAttrs(Path path)
+          throws IOException {
+    throw new UnsupportedOperationException(getClass().getSimpleName()
+            + " doesn't support listXAttrs");
+  }
+
+  /**
    * Remove an xattr of a file or directory.
    * The name must be prefixed with user/trusted/security/system and
    * followed by ".". For example, "user.attr".

Modified: hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java Sat Jun  7 16:29:10 2014
@@ -265,4 +265,10 @@ public class CommonConfigurationKeys ext
   public static final boolean RPC_METRICS_QUANTILE_ENABLE_DEFAULT = false;
   public static final String  RPC_METRICS_PERCENTILES_INTERVALS_KEY =
       "rpc.metrics.percentiles.intervals";
+  
+  /** Allowed hosts for nfs exports */
+  public static final String NFS_EXPORTS_ALLOWED_HOSTS_SEPARATOR = ";";
+  public static final String NFS_EXPORTS_ALLOWED_HOSTS_KEY = "nfs.exports.allowed.hosts";
+  public static final String NFS_EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT = "* rw";
+
 }

Modified: hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java Sat Jun  7 16:29:10 2014
@@ -2403,8 +2403,8 @@ public abstract class FileSystem extends
    * <p/>
    * The access permissions of an xattr in the "user" namespace are
    * defined by the file and directory permission bits.
-   * An xattr can only be set when the logged-in user has the correct permissions.
-   * If the xattr exists, it will be replaced.
+   * An xattr can only be set if the logged-in user has the correct permissions.
+   * If the xattr exists, it is replaced.
    * <p/>
    * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
    * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
@@ -2422,7 +2422,7 @@ public abstract class FileSystem extends
   }
 
   /**
-   * Get an xattr for a file or directory.
+   * Get an xattr name and value for a file or directory.
    * The name must be prefixed with user/trusted/security/system and
    * followed by ".". For example, "user.attr".
    * <p/>
@@ -2432,7 +2432,8 @@ public abstract class FileSystem extends
    * The xattrs of the "security" and "system" namespaces are only used/exposed 
    * internally by/to the FS impl.
    * <p/>
-   * An xattr will only be returned when the logged-in user has the correct permissions.
+   * An xattr will only be returned if the logged-in user has the
+   * correct permissions.
    * <p/>
    * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
    * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
@@ -2448,13 +2449,13 @@ public abstract class FileSystem extends
   }
 
   /**
-   * Get all of the xattrs for a file or directory.
-   * Only those xattrs for which the logged-in user has permissions to view
+   * Get all of the xattr name/value pairs for a file or directory.
+   * Only those xattrs which the logged-in user has permissions to view
    * are returned.
    * <p/>
    * A regular user can only get xattrs for the "user" namespace.
    * The super user can only get xattrs for "user" and "trusted" namespaces.
-   * The xattr of "security" and "system" namespaces are only used/exposed 
+   * The xattrs of the "security" and "system" namespaces are only used/exposed
    * internally by/to the FS impl.
    * <p/>
    * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
@@ -2470,13 +2471,13 @@ public abstract class FileSystem extends
   }
 
   /**
-   * Get all of the xattrs for a file or directory.
-   * Only those xattrs for which the logged-in user has permissions to view
+   * Get all of the xattrs name/value pairs for a file or directory.
+   * Only those xattrs which the logged-in user has permissions to view
    * are returned.
    * <p/>
    * A regular user can only get xattrs for the "user" namespace.
    * The super user can only get xattrs for "user" and "trusted" namespaces.
-   * The xattr of "security" and "system" namespaces are only used/exposed 
+   * The xattrs of the "security" and "system" namespaces are only used/exposed
    * internally by/to the FS impl.
    * <p/>
    * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
@@ -2494,6 +2495,29 @@ public abstract class FileSystem extends
   }
 
   /**
+   * Get all of the xattr names for a file or directory.
+   * Only those xattr names which the logged-in user has permissions to view
+   * are returned.
+   * <p/>
+   * A regular user can only get xattr names for the "user" namespace.
+   * The super user can only get xattr names for "user" and "trusted"
+   * namespaces.
+   * The xattrs of the "security" and "system" namespaces are only
+   * used/exposed internally by/to the FS impl.
+   * <p/>
+   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
+   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   *
+   * @param path Path to get extended attributes
+   * @return Map<String, byte[]> describing the XAttrs of the file or directory
+   * @throws IOException
+   */
+  public List<String> listXAttrs(Path path) throws IOException {
+    throw new UnsupportedOperationException(getClass().getSimpleName()
+            + " doesn't support listXAttrs");
+  }
+
+  /**
    * Remove an xattr of a file or directory.
    * The name must be prefixed with user/trusted/security/system and
    * followed by ".". For example, "user.attr".

Modified: hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java Sat Jun  7 16:29:10 2014
@@ -573,6 +573,11 @@ public class FilterFileSystem extends Fi
   }
 
   @Override
+  public List<String> listXAttrs(Path path) throws IOException {
+    return fs.listXAttrs(path);
+  }
+
+  @Override
   public void removeXAttr(Path path, String name) throws IOException {
     fs.removeXAttr(path, name);
   }

Modified: hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java Sat Jun  7 16:29:10 2014
@@ -347,6 +347,11 @@ public abstract class FilterFs extends A
   }
 
   @Override
+  public List<String> listXAttrs(Path path) throws IOException {
+    return myFs.listXAttrs(path);
+  }
+
+  @Override
   public void removeXAttr(Path path, String name) throws IOException {
     myFs.removeXAttr(path, name);
   }

Modified: hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java Sat Jun  7 16:29:10 2014
@@ -338,6 +338,11 @@ class ChRootedFileSystem extends FilterF
   }
 
   @Override
+  public List<String> listXAttrs(Path path) throws IOException {
+    return super.listXAttrs(fullPath(path));
+  }
+
+  @Override
   public void removeXAttr(Path path, String name) throws IOException {
     super.removeXAttr(fullPath(path), name);
   }

Modified: hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java Sat Jun  7 16:29:10 2014
@@ -552,6 +552,13 @@ public class ViewFileSystem extends File
   }
 
   @Override
+  public List<String> listXAttrs(Path path) throws IOException {
+    InodeTree.ResolveResult<FileSystem> res =
+      fsState.resolve(getUriPath(path), true);
+    return res.targetFileSystem.listXAttrs(res.remainingPath);
+  }
+
+  @Override
   public void removeXAttr(Path path, String name) throws IOException {
     InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(path),
         true);

Modified: hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java Sat Jun  7 16:29:10 2014
@@ -36,7 +36,6 @@ import org.apache.hadoop.ipc.ProtocolTra
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RpcConstants;
 import org.apache.hadoop.ipc.RpcInvocationHandler;
-import org.apache.hadoop.util.ThreadUtil;
 
 import com.google.common.annotations.VisibleForTesting;
 
@@ -160,11 +159,11 @@ public class RetryInvocationHandler<T> i
               if (invocationAttemptFailoverCount == proxyProviderFailoverCount) {
                 proxyProvider.performFailover(currentProxy.proxy);
                 proxyProviderFailoverCount++;
-                currentProxy = proxyProvider.getProxy();
               } else {
                 LOG.warn("A failover has occurred since the start of this method"
                     + " invocation attempt.");
               }
+              currentProxy = proxyProvider.getProxy();
             }
             invocationFailoverCount++;
           }

Modified: hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java Sat Jun  7 16:29:10 2014
@@ -19,8 +19,10 @@ package org.apache.hadoop.net;
 
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.List;
 import java.util.Random;
+import java.util.TreeMap;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
@@ -32,6 +34,9 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.util.ReflectionUtils;
 
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+
 /** The class represents a cluster of computer with a tree hierarchical
  * network topology.
  * For example, a cluster may be consists of many data centers filled 
@@ -667,7 +672,23 @@ public class NetworkTopology {
     return node1.getParent()==node2.getParent();
   }
 
-  final protected static Random r = new Random();
+  private static final ThreadLocal<Random> r = new ThreadLocal<Random>();
+
+  /**
+   * Getter for thread-local Random, which provides better performance than
+   * a shared Random (even though Random is thread-safe).
+   *
+   * @return Thread-local Random.
+   */
+  protected Random getRandom() {
+    Random rand = r.get();
+    if (rand == null) {
+      rand = new Random();
+      r.set(rand);
+    }
+    return rand;
+  }
+
   /** randomly choose one node from <i>scope</i>
    * if scope starts with ~, choose one from the all nodes except for the
    * ones in <i>scope</i>; otherwise, choose one from <i>scope</i>
@@ -717,7 +738,7 @@ public class NetworkTopology {
           "Failed to find datanode (scope=\"" + String.valueOf(scope) +
           "\" excludedScope=\"" + String.valueOf(excludedScope) + "\").");
     }
-    int leaveIndex = r.nextInt(numOfDatanodes);
+    int leaveIndex = getRandom().nextInt(numOfDatanodes);
     return innerNode.getLeaf(leaveIndex, node);
   }
 
@@ -824,61 +845,79 @@ public class NetworkTopology {
     return networkLocation.substring(index);
   }
 
-  /** swap two array items */
-  static protected void swap(Node[] nodes, int i, int j) {
-    Node tempNode;
-    tempNode = nodes[j];
-    nodes[j] = nodes[i];
-    nodes[i] = tempNode;
-  }
-  
-  /** Sort nodes array by their distances to <i>reader</i>
-   * It linearly scans the array, if a local node is found, swap it with
-   * the first element of the array.
-   * If a local rack node is found, swap it with the first element following
-   * the local node.
-   * If neither local node or local rack node is found, put a random replica
-   * location at position 0.
-   * It leaves the rest nodes untouched.
-   * @param reader the node that wishes to read a block from one of the nodes
-   * @param nodes the list of nodes containing data for the reader
-   */
-  public void pseudoSortByDistance( Node reader, Node[] nodes ) {
-    int tempIndex = 0;
-    int localRackNode = -1;
-    if (reader != null ) {
-      //scan the array to find the local node & local rack node
-      for(int i=0; i<nodes.length; i++) {
-        if(tempIndex == 0 && reader == nodes[i]) { //local node
-          //swap the local node and the node at position 0
-          if( i != 0 ) {
-            swap(nodes, tempIndex, i);
-          }
-          tempIndex=1;
-          if(localRackNode != -1 ) {
-            if(localRackNode == 0) {
-              localRackNode = i;
-            }
-            break;
-          }
-        } else if(localRackNode == -1 && isOnSameRack(reader, nodes[i])) {
-          //local rack
-          localRackNode = i;
-          if(tempIndex != 0 ) break;
-        }
+  /**
+   * Returns an integer weight which specifies how far away {node} is away from
+   * {reader}. A lower value signifies that a node is closer.
+   * 
+   * @param reader Node where data will be read
+   * @param node Replica of data
+   * @return weight
+   */
+  protected int getWeight(Node reader, Node node) {
+    // 0 is local, 1 is same rack, 2 is off rack
+    // Start off by initializing to off rack
+    int weight = 2;
+    if (reader != null) {
+      if (reader == node) {
+        weight = 0;
+      } else if (isOnSameRack(reader, node)) {
+        weight = 1;
       }
+    }
+    return weight;
+  }
 
-      // swap the local rack node and the node at position tempIndex
-      if(localRackNode != -1 && localRackNode != tempIndex ) {
-        swap(nodes, tempIndex, localRackNode);
-        tempIndex++;
+  /**
+   * Sort nodes array by network distance to <i>reader</i>.
+   * <p/>
+   * In a three-level topology, a node can be either local, on the same rack, or
+   * on a different rack from the reader. Sorting the nodes based on network
+   * distance from the reader reduces network traffic and improves performance.
+   * <p/>
+   * As an additional twist, we also randomize the nodes at each network
+   * distance using the provided random seed. This helps with load balancing
+   * when there is data skew.
+   * 
+   * @param reader Node where data will be read
+   * @param nodes Available replicas with the requested data
+   * @param seed Used to seed the pseudo-random generator that randomizes the
+   *          set of nodes at each network distance.
+   */
+  public void sortByDistance(Node reader, Node[] nodes, long seed) {
+    /** Sort weights for the nodes array */
+    int[] weights = new int[nodes.length];
+    for (int i=0; i<nodes.length; i++) {
+      weights[i] = getWeight(reader, nodes[i]);
+    }
+    // Add weight/node pairs to a TreeMap to sort
+    TreeMap<Integer, List<Node>> tree = new TreeMap<Integer, List<Node>>();
+    for (int i=0; i<nodes.length; i++) {
+      int weight = weights[i];
+      Node node = nodes[i];
+      List<Node> list = tree.get(weight);
+      if (list == null) {
+        list = Lists.newArrayListWithExpectedSize(1);
+        tree.put(weight, list);
+      }
+      list.add(node);
+    }
+
+    // Seed is normally the block id
+    // This means we use the same pseudo-random order for each block, for
+    // potentially better page cache usage.
+    Random rand = getRandom();
+    rand.setSeed(seed);
+    int idx = 0;
+    for (List<Node> list: tree.values()) {
+      if (list != null) {
+        Collections.shuffle(list, rand);
+        for (Node n: list) {
+          nodes[idx] = n;
+          idx++;
+        }
       }
     }
-    
-    // put a random node at position 0 if it is not a local/local-rack node
-    if(tempIndex == 0 && localRackNode == -1 && nodes.length != 0) {
-      swap(nodes, 0, r.nextInt(nodes.length));
-    }
+    Preconditions.checkState(idx == nodes.length,
+        "Sorted the wrong number of nodes!");
   }
-  
 }

Modified: hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java Sat Jun  7 16:29:10 2014
@@ -248,25 +248,41 @@ public class NetworkTopologyWithNodeGrou
     }
   }
 
-  /** Sort nodes array by their distances to <i>reader</i>
-   * It linearly scans the array, if a local node is found, swap it with
-   * the first element of the array.
-   * If a local node group node is found, swap it with the first element 
-   * following the local node.
-   * If a local rack node is found, swap it with the first element following
-   * the local node group node.
-   * If neither local node, node group node or local rack node is found, put a 
-   * random replica location at position 0.
-   * It leaves the rest nodes untouched.
-   * @param reader the node that wishes to read a block from one of the nodes
-   * @param nodes the list of nodes containing data for the reader
-   */
   @Override
-  public void pseudoSortByDistance( Node reader, Node[] nodes ) {
+  protected int getWeight(Node reader, Node node) {
+    // 0 is local, 1 is same node group, 2 is same rack, 3 is off rack
+    // Start off by initializing to off rack
+    int weight = 3;
+    if (reader != null) {
+      if (reader == node) {
+        weight = 0;
+      } else if (isOnSameNodeGroup(reader, node)) {
+        weight = 1;
+      } else if (isOnSameRack(reader, node)) {
+        weight = 2;
+      }
+    }
+    return weight;
+  }
 
+  /**
+   * Sort nodes array by their distances to <i>reader</i>.
+   * <p/>
+   * This is the same as
+   * {@link NetworkTopology#sortByDistance(Node, Node[], long)} except with a
+   * four-level network topology which contains the additional network distance
+   * of a "node group" which is between local and same rack.
+   * 
+   * @param reader Node where data will be read
+   * @param nodes Available replicas with the requested data
+   * @param seed Used to seed the pseudo-random generator that randomizes the
+   *          set of nodes at each network distance.
+   */
+  @Override
+  public void sortByDistance( Node reader, Node[] nodes, long seed) {
+    // If reader is not a datanode (not in NetworkTopology tree), we need to
+    // replace this reader with a sibling leaf node in tree.
     if (reader != null && !this.contains(reader)) {
-      // if reader is not a datanode (not in NetworkTopology tree), we will 
-      // replace this reader with a sibling leaf node in tree.
       Node nodeGroup = getNode(reader.getNetworkLocation());
       if (nodeGroup != null && nodeGroup instanceof InnerNode) {
         InnerNode parentNode = (InnerNode) nodeGroup;
@@ -276,62 +292,7 @@ public class NetworkTopologyWithNodeGrou
         return;
       }
     }
-    int tempIndex = 0;
-    int localRackNode = -1;
-    int localNodeGroupNode = -1;
-    if (reader != null) {  
-      //scan the array to find the local node & local rack node
-      for (int i = 0; i < nodes.length; i++) {
-        if (tempIndex == 0 && reader == nodes[i]) { //local node
-          //swap the local node and the node at position 0
-          if (i != 0) {
-            swap(nodes, tempIndex, i);
-          }
-          tempIndex=1;
-
-          if (localRackNode != -1 && (localNodeGroupNode !=-1)) {
-            if (localRackNode == 0) {
-              localRackNode = i;
-            }
-            if (localNodeGroupNode == 0) {
-              localNodeGroupNode = i;
-            }
-            break;
-          }
-        } else if (localNodeGroupNode == -1 && isOnSameNodeGroup(reader, 
-            nodes[i])) {
-          //local node group
-          localNodeGroupNode = i;
-          // node local and rack local are already found
-          if(tempIndex != 0 && localRackNode != -1) break;
-        } else if (localRackNode == -1 && isOnSameRack(reader, nodes[i])) {
-          localRackNode = i;
-          if (tempIndex != 0 && localNodeGroupNode != -1) break;
-        }
-      }
-
-      // swap the local nodegroup node and the node at position tempIndex
-      if(localNodeGroupNode != -1 && localNodeGroupNode != tempIndex) {
-        swap(nodes, tempIndex, localNodeGroupNode);
-        if (localRackNode == tempIndex) {
-          localRackNode = localNodeGroupNode;
-        }
-        tempIndex++;
-      }
-
-      // swap the local rack node and the node at position tempIndex
-      if(localRackNode != -1 && localRackNode != tempIndex) {
-        swap(nodes, tempIndex, localRackNode);
-        tempIndex++;
-      }
-    }
-
-    // put a random node at position 0 if there is not a local/local-nodegroup/
-    // local-rack node
-    if (tempIndex == 0 && localNodeGroupNode == -1 && localRackNode == -1
-        && nodes.length != 0) {
-      swap(nodes, 0, r.nextInt(nodes.length));
-    }
+    super.sortByDistance(reader, nodes, seed);
   }
 
   /** InnerNodeWithNodeGroup represents a switch/router of a data center, rack

Modified: hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java Sat Jun  7 16:29:10 2014
@@ -188,33 +188,33 @@ public class FileBasedKeyStoresFactory i
     String locationProperty =
       resolvePropertyName(mode, SSL_TRUSTSTORE_LOCATION_TPL_KEY);
     String truststoreLocation = conf.get(locationProperty, "");
-    if (truststoreLocation.isEmpty()) {
-      throw new GeneralSecurityException("The property '" + locationProperty +
-        "' has not been set in the ssl configuration file.");
-    }
-
-    String passwordProperty = resolvePropertyName(mode,
-                                                  SSL_TRUSTSTORE_PASSWORD_TPL_KEY);
-    String truststorePassword = conf.get(passwordProperty, "");
-    if (truststorePassword.isEmpty()) {
-      throw new GeneralSecurityException("The property '" + passwordProperty +
-        "' has not been set in the ssl configuration file.");
+    if (!truststoreLocation.isEmpty()) {
+      String passwordProperty = resolvePropertyName(mode,
+          SSL_TRUSTSTORE_PASSWORD_TPL_KEY);
+      String truststorePassword = conf.get(passwordProperty, "");
+      if (truststorePassword.isEmpty()) {
+        throw new GeneralSecurityException("The property '" + passwordProperty +
+            "' has not been set in the ssl configuration file.");
+      }
+      long truststoreReloadInterval =
+          conf.getLong(
+              resolvePropertyName(mode, SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY),
+              DEFAULT_SSL_TRUSTSTORE_RELOAD_INTERVAL);
+
+      LOG.debug(mode.toString() + " TrustStore: " + truststoreLocation);
+
+      trustManager = new ReloadingX509TrustManager(truststoreType,
+          truststoreLocation,
+          truststorePassword,
+          truststoreReloadInterval);
+      trustManager.init();
+      LOG.debug(mode.toString() + " Loaded TrustStore: " + truststoreLocation);
+      trustManagers = new TrustManager[]{trustManager};
+    } else {
+      LOG.warn("The property '" + locationProperty + "' has not been set, " +
+          "no TrustStore will be loaded");
+      trustManagers = null;
     }
-    long truststoreReloadInterval =
-      conf.getLong(
-        resolvePropertyName(mode, SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY),
-        DEFAULT_SSL_TRUSTSTORE_RELOAD_INTERVAL);
-
-    LOG.debug(mode.toString() + " TrustStore: " + truststoreLocation);
-
-    trustManager = new ReloadingX509TrustManager(truststoreType,
-                                                 truststoreLocation,
-                                                 truststorePassword,
-                                                 truststoreReloadInterval);
-    trustManager.init();
-    LOG.debug(mode.toString() + " Loaded TrustStore: " + truststoreLocation);
-
-    trustManagers = new TrustManager[]{trustManager};
   }
 
   /**

Modified: hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml Sat Jun  7 16:29:10 2014
@@ -1309,18 +1309,17 @@
 </property>
 
 <property>
-  <name>nfs3.server.port</name>
-  <value>2049</value>
+  <name>nfs.exports.allowed.hosts</name>
+  <value>* rw</value>
   <description>
-      Specify the port number used by Hadoop NFS.
-  </description>
-</property>
-
-<property>
-  <name>nfs3.mountd.port</name>
-  <value>4242</value>
-  <description>
-      Specify the port number used by Hadoop mount daemon.
+    By default, the export can be mounted by any client. The value string 
+    contains machine name and access privilege, separated by whitespace 
+    characters. The machine name format can be a single host, a Java regular 
+    expression, or an IPv4 address. The access privilege uses rw or ro to 
+    specify read/write or read-only access of the machines to exports. If the 
+    access privilege is not provided, the default is read-only. Entries are separated by ";".
+    For example: "192.168.0.0/22 rw ; host.*\.example\.com ; host1.test.org ro;".
+    Only the NFS gateway needs to restart after this property is updated. 
   </description>
 </property>
 

Modified: hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java Sat Jun  7 16:29:10 2014
@@ -196,6 +196,8 @@ public class TestHarFileSystem {
     public Map<String, byte[]> getXAttrs(Path path, List<String> names)
         throws IOException;
 
+    public List<String> listXAttrs(Path path) throws IOException;
+
     public void removeXAttr(Path path, String name) throws IOException;
 
     public AclStatus getAclStatus(Path path) throws IOException;

Modified: hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetworkTopologyWithNodeGroup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetworkTopologyWithNodeGroup.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetworkTopologyWithNodeGroup.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetworkTopologyWithNodeGroup.java Sat Jun  7 16:29:10 2014
@@ -96,7 +96,7 @@ public class TestNetworkTopologyWithNode
   }
 
   @Test
-  public void testPseudoSortByDistance() throws Exception {
+  public void testSortByDistance() throws Exception {
     NodeBase[] testNodes = new NodeBase[4];
 
     // array contains both local node, local node group & local rack node
@@ -104,7 +104,7 @@ public class TestNetworkTopologyWithNode
     testNodes[1] = dataNodes[2];
     testNodes[2] = dataNodes[3];
     testNodes[3] = dataNodes[0];
-    cluster.pseudoSortByDistance(dataNodes[0], testNodes );
+    cluster.sortByDistance(dataNodes[0], testNodes, 0xDEADBEEF);
     assertTrue(testNodes[0] == dataNodes[0]);
     assertTrue(testNodes[1] == dataNodes[1]);
     assertTrue(testNodes[2] == dataNodes[2]);
@@ -115,7 +115,7 @@ public class TestNetworkTopologyWithNode
     testNodes[1] = dataNodes[4];
     testNodes[2] = dataNodes[1];
     testNodes[3] = dataNodes[0];
-    cluster.pseudoSortByDistance(dataNodes[0], testNodes );
+    cluster.sortByDistance(dataNodes[0], testNodes, 0xDEADBEEF);
     assertTrue(testNodes[0] == dataNodes[0]);
     assertTrue(testNodes[1] == dataNodes[1]);
 
@@ -124,7 +124,7 @@ public class TestNetworkTopologyWithNode
     testNodes[1] = dataNodes[3];
     testNodes[2] = dataNodes[2];
     testNodes[3] = dataNodes[0];
-    cluster.pseudoSortByDistance(dataNodes[0], testNodes );
+    cluster.sortByDistance(dataNodes[0], testNodes, 0xDEADBEEF);
     assertTrue(testNodes[0] == dataNodes[0]);
     assertTrue(testNodes[1] == dataNodes[2]);
 
@@ -133,7 +133,7 @@ public class TestNetworkTopologyWithNode
     testNodes[1] = dataNodes[7];
     testNodes[2] = dataNodes[2];
     testNodes[3] = dataNodes[0];
-    cluster.pseudoSortByDistance(computeNode, testNodes );
+    cluster.sortByDistance(computeNode, testNodes, 0xDEADBEEF);
     assertTrue(testNodes[0] == dataNodes[0]);
     assertTrue(testNodes[1] == dataNodes[2]);
   }

Modified: hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java Sat Jun  7 16:29:10 2014
@@ -76,8 +76,8 @@ public class KeyStoreTestUtil {
    * @throws GeneralSecurityException thrown if an Security error ocurred.
    */
   public static X509Certificate generateCertificate(String dn, KeyPair pair,
-                                                    int days, String algorithm)
-    throws GeneralSecurityException, IOException {
+      int days, String algorithm)
+      throws GeneralSecurityException, IOException {
     PrivateKey privkey = pair.getPrivate();
     X509CertInfo info = new X509CertInfo();
     Date from = new Date();
@@ -92,7 +92,7 @@ public class KeyStoreTestUtil {
     info.set(X509CertInfo.ISSUER, new CertificateIssuerName(owner));
     info.set(X509CertInfo.KEY, new CertificateX509Key(pair.getPublic()));
     info
-      .set(X509CertInfo.VERSION, new CertificateVersion(CertificateVersion.V3));
+        .set(X509CertInfo.VERSION, new CertificateVersion(CertificateVersion.V3));
     AlgorithmId algo = new AlgorithmId(AlgorithmId.md5WithRSAEncryption_oid);
     info.set(X509CertInfo.ALGORITHM_ID, new CertificateAlgorithmId(algo));
 
@@ -103,30 +103,30 @@ public class KeyStoreTestUtil {
     // Update the algorith, and resign.
     algo = (AlgorithmId) cert.get(X509CertImpl.SIG_ALG);
     info
-      .set(CertificateAlgorithmId.NAME + "." + CertificateAlgorithmId.ALGORITHM,
-           algo);
+        .set(CertificateAlgorithmId.NAME + "." + CertificateAlgorithmId.ALGORITHM,
+            algo);
     cert = new X509CertImpl(info);
     cert.sign(privkey, algorithm);
     return cert;
   }
 
   public static KeyPair generateKeyPair(String algorithm)
-    throws NoSuchAlgorithmException {
+      throws NoSuchAlgorithmException {
     KeyPairGenerator keyGen = KeyPairGenerator.getInstance(algorithm);
     keyGen.initialize(1024);
     return keyGen.genKeyPair();
   }
 
   private static KeyStore createEmptyKeyStore()
-    throws GeneralSecurityException, IOException {
+      throws GeneralSecurityException, IOException {
     KeyStore ks = KeyStore.getInstance("JKS");
     ks.load(null, null); // initialize
     return ks;
   }
 
   private static void saveKeyStore(KeyStore ks, String filename,
-                                   String password)
-    throws GeneralSecurityException, IOException {
+      String password)
+      throws GeneralSecurityException, IOException {
     FileOutputStream out = new FileOutputStream(filename);
     try {
       ks.store(out, password.toCharArray());
@@ -136,18 +136,18 @@ public class KeyStoreTestUtil {
   }
 
   public static void createKeyStore(String filename,
-                                    String password, String alias,
-                                    Key privateKey, Certificate cert)
-    throws GeneralSecurityException, IOException {
+      String password, String alias,
+      Key privateKey, Certificate cert)
+      throws GeneralSecurityException, IOException {
     KeyStore ks = createEmptyKeyStore();
     ks.setKeyEntry(alias, privateKey, password.toCharArray(),
-                   new Certificate[]{cert});
+        new Certificate[]{cert});
     saveKeyStore(ks, filename, password);
   }
 
   /**
    * Creates a keystore with a single key and saves it to a file.
-   * 
+   *
    * @param filename String file to save
    * @param password String store password to set on keystore
    * @param keyPassword String key password to set on key
@@ -158,27 +158,27 @@ public class KeyStoreTestUtil {
    * @throws IOException if there is an I/O error saving the file
    */
   public static void createKeyStore(String filename,
-                                    String password, String keyPassword, String alias,
-                                    Key privateKey, Certificate cert)
-    throws GeneralSecurityException, IOException {
+      String password, String keyPassword, String alias,
+      Key privateKey, Certificate cert)
+      throws GeneralSecurityException, IOException {
     KeyStore ks = createEmptyKeyStore();
     ks.setKeyEntry(alias, privateKey, keyPassword.toCharArray(),
-                   new Certificate[]{cert});
+        new Certificate[]{cert});
     saveKeyStore(ks, filename, password);
   }
 
   public static void createTrustStore(String filename,
-                                      String password, String alias,
-                                      Certificate cert)
-    throws GeneralSecurityException, IOException {
+      String password, String alias,
+      Certificate cert)
+      throws GeneralSecurityException, IOException {
     KeyStore ks = createEmptyKeyStore();
     ks.setCertificateEntry(alias, cert);
     saveKeyStore(ks, filename, password);
   }
 
   public static <T extends Certificate> void createTrustStore(
-    String filename, String password, Map<String, T> certs)
-    throws GeneralSecurityException, IOException {
+      String filename, String password, Map<String, T> certs)
+      throws GeneralSecurityException, IOException {
     KeyStore ks = createEmptyKeyStore();
     for (Map.Entry<String, T> cert : certs.entrySet()) {
       ks.setCertificateEntry(cert.getKey(), cert.getValue());
@@ -187,7 +187,7 @@ public class KeyStoreTestUtil {
   }
 
   public static void cleanupSSLConfig(String keystoresDir, String sslConfDir)
-    throws Exception {
+      throws Exception {
     File f = new File(keystoresDir + "/clientKS.jks");
     f.delete();
     f = new File(keystoresDir + "/serverKS.jks");
@@ -196,7 +196,7 @@ public class KeyStoreTestUtil {
     f.delete();
     f = new File(sslConfDir + "/ssl-client.xml");
     f.delete();
-    f = new File(sslConfDir +  "/ssl-server.xml");
+    f = new File(sslConfDir + "/ssl-server.xml");
     f.delete();
   }
 
@@ -205,22 +205,42 @@ public class KeyStoreTestUtil {
    * SSLFactory.  This includes keys, certs, keystores, truststores, the server
    * SSL configuration file, the client SSL configuration file, and the master
    * configuration file read by the SSLFactory.
-   * 
+   *
    * @param keystoresDir String directory to save keystores
    * @param sslConfDir String directory to save SSL configuration files
    * @param conf Configuration master configuration to be used by an SSLFactory,
-   *   which will be mutated by this method
+   * which will be mutated by this method
    * @param useClientCert boolean true to make the client present a cert in the
-   *   SSL handshake
+   * SSL handshake
    */
   public static void setupSSLConfig(String keystoresDir, String sslConfDir,
-                                    Configuration conf, boolean useClientCert)
+      Configuration conf, boolean useClientCert) throws Exception {
+    setupSSLConfig(keystoresDir, sslConfDir, conf, useClientCert, true);
+  }
+
+  /**
+   * Performs complete setup of SSL configuration in preparation for testing an
+   * SSLFactory.  This includes keys, certs, keystores, truststores, the server
+   * SSL configuration file, the client SSL configuration file, and the master
+   * configuration file read by the SSLFactory.
+   *
+   * @param keystoresDir String directory to save keystores
+   * @param sslConfDir String directory to save SSL configuration files
+   * @param conf Configuration master configuration to be used by an SSLFactory,
+   * which will be mutated by this method
+   * @param useClientCert boolean true to make the client present a cert in the
+   * SSL handshake
+   * @param trustStore boolean true to create truststore, false not to create it
+   */
+  public static void setupSSLConfig(String keystoresDir, String sslConfDir,
+                                    Configuration conf, boolean useClientCert,
+      boolean trustStore)
     throws Exception {
     String clientKS = keystoresDir + "/clientKS.jks";
     String clientPassword = "clientP";
     String serverKS = keystoresDir + "/serverKS.jks";
     String serverPassword = "serverP";
-    String trustKS = keystoresDir + "/trustKS.jks";
+    String trustKS = null;
     String trustPassword = "trustP";
 
     File sslClientConfFile = new File(sslConfDir + "/ssl-client.xml");
@@ -246,7 +266,10 @@ public class KeyStoreTestUtil {
                                     sKP.getPrivate(), sCert);
     certs.put("server", sCert);
 
-    KeyStoreTestUtil.createTrustStore(trustKS, trustPassword, certs);
+    if (trustStore) {
+      trustKS = keystoresDir + "/trustKS.jks";
+      KeyStoreTestUtil.createTrustStore(trustKS, trustPassword, certs);
+    }
 
     Configuration clientSSLConf = createClientSSLConfig(clientKS, clientPassword,
       clientPassword, trustKS);

Modified: hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java Sat Jun  7 16:29:10 2014
@@ -50,11 +50,12 @@ public class TestSSLFactory {
     base.mkdirs();
   }
 
-  private Configuration createConfiguration(boolean clientCert)
+  private Configuration createConfiguration(boolean clientCert,
+      boolean trustStore)
     throws Exception {
     Configuration conf = new Configuration();
     KeyStoreTestUtil.setupSSLConfig(KEYSTORES_DIR, sslConfsDir, conf,
-      clientCert);
+      clientCert, trustStore);
     return conf;
   }
 
@@ -67,7 +68,7 @@ public class TestSSLFactory {
 
   @Test(expected = IllegalStateException.class)
   public void clientMode() throws Exception {
-    Configuration conf = createConfiguration(false);
+    Configuration conf = createConfiguration(false, true);
     SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
     try {
       sslFactory.init();
@@ -80,7 +81,7 @@ public class TestSSLFactory {
   }
 
   private void serverMode(boolean clientCert, boolean socket) throws Exception {
-    Configuration conf = createConfiguration(clientCert);
+    Configuration conf = createConfiguration(clientCert, true);
     SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf);
     try {
       sslFactory.init();
@@ -119,7 +120,7 @@ public class TestSSLFactory {
 
   @Test
   public void validHostnameVerifier() throws Exception {
-    Configuration conf = createConfiguration(false);
+    Configuration conf = createConfiguration(false, true);
     conf.unset(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY);
     SSLFactory sslFactory = new
       SSLFactory(SSLFactory.Mode.CLIENT, conf);
@@ -157,7 +158,7 @@ public class TestSSLFactory {
 
   @Test(expected = GeneralSecurityException.class)
   public void invalidHostnameVerifier() throws Exception {
-    Configuration conf = createConfiguration(false);
+    Configuration conf = createConfiguration(false, true);
     conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "foo");
     SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
     try {
@@ -169,7 +170,7 @@ public class TestSSLFactory {
 
   @Test
   public void testConnectionConfigurator() throws Exception {
-    Configuration conf = createConfiguration(false);
+    Configuration conf = createConfiguration(false, true);
     conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "STRICT_IE6");
     SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
     try {
@@ -275,7 +276,7 @@ public class TestSSLFactory {
 
   @Test
   public void testNoClientCertsInitialization() throws Exception {
-    Configuration conf = createConfiguration(false);
+    Configuration conf = createConfiguration(false, true);
     conf.unset(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY);
     SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
     try {
@@ -285,4 +286,16 @@ public class TestSSLFactory {
     }
   }
 
+  @Test
+  public void testNoTrustStore() throws Exception {
+    Configuration conf = createConfiguration(false, false);
+    conf.unset(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY);
+    SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf);
+    try {
+      sslFactory.init();
+    } finally {
+      sslFactory.destroy();
+    }
+  }
+
 }

Modified: hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSCacheKeyProvider.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSCacheKeyProvider.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSCacheKeyProvider.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSCacheKeyProvider.java Sat Jun  7 16:29:10 2014
@@ -135,14 +135,11 @@ public class KMSCacheKeyProvider extends
 
   @Override
   public void deleteKey(String name) throws IOException {
-    Metadata metadata = provider.getMetadata(name);
-    List<String> versions = new ArrayList<String>(metadata.getVersions());
-    for (int i = 0; i < metadata.getVersions(); i++) {
-      versions.add(KeyProvider.buildVersionName(name, i));
-    }
     provider.deleteKey(name);
     currentKeyCache.invalidate(name);
-    keyVersionCache.invalidateAll(versions);
+    // invalidating all key versions as we don't know which ones belonged to the
+    // deleted key
+    keyVersionCache.invalidateAll();
   }
 
   @Override

Modified: hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java Sat Jun  7 16:29:10 2014
@@ -577,7 +577,9 @@ public class TestKMS {
               Assert.fail(ex.toString());
             }
             try {
-              kp.getKeyVersion(KMSClientProvider.buildVersionName("k", 0));
+              // we are using JavaKeyStoreProvider for testing, so we know how
+              // the keyversion is created.
+              kp.getKeyVersion("k@0");
               Assert.fail();
             } catch (AuthorizationException ex) {
               //NOP
@@ -717,6 +719,9 @@ public class TestKMS {
           }
         });
 
+        //stop the reloader, to avoid running while we are writing the new file
+        KMSWebApp.getACLs().stopReloader();
+
         // test ACL reloading
         Thread.sleep(10); // to ensure the ACLs file modifiedTime is newer
         conf.set(KMSACLs.Type.CREATE.getConfigKey(), "foo");

Modified: hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java Sat Jun  7 16:29:10 2014
@@ -27,6 +27,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.commons.net.util.SubnetUtils;
 import org.apache.commons.net.util.SubnetUtils.SubnetInfo;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
 import org.apache.hadoop.util.LightWeightCache;
 import org.apache.hadoop.util.LightWeightGSet;
@@ -44,13 +45,14 @@ public class NfsExports {
   
   public static synchronized NfsExports getInstance(Configuration conf) {
     if (exports == null) {
-      String matchHosts = conf.get(Nfs3Constant.EXPORTS_ALLOWED_HOSTS_KEY,
-          Nfs3Constant.EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT);
-      int cacheSize = conf.getInt(Nfs3Constant.EXPORTS_CACHE_SIZE_KEY,
-          Nfs3Constant.EXPORTS_CACHE_SIZE_DEFAULT);
+      String matchHosts = conf.get(
+          CommonConfigurationKeys.NFS_EXPORTS_ALLOWED_HOSTS_KEY,
+          CommonConfigurationKeys.NFS_EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT);
+      int cacheSize = conf.getInt(Nfs3Constant.NFS_EXPORTS_CACHE_SIZE_KEY,
+          Nfs3Constant.NFS_EXPORTS_CACHE_SIZE_DEFAULT);
       long expirationPeriodNano = conf.getLong(
-          Nfs3Constant.EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY,
-          Nfs3Constant.EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT) * 1000 * 1000;
+          Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY,
+          Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT) * 1000 * 1000;
       exports = new NfsExports(cacheSize, expirationPeriodNano, matchHosts);
     }
     return exports;
@@ -140,7 +142,7 @@ public class NfsExports {
     accessCache = new LightWeightCache<AccessCacheEntry, AccessCacheEntry>(
         cacheSize, cacheSize, expirationPeriodNano, 0);        
     String[] matchStrings = matchHosts.split(
-        Nfs3Constant.EXPORTS_ALLOWED_HOSTS_SEPARATOR);
+        CommonConfigurationKeys.NFS_EXPORTS_ALLOWED_HOSTS_SEPARATOR);
     mMatches = new ArrayList<Match>(matchStrings.length);
     for(String mStr : matchStrings) {
       if (LOG.isDebugEnabled()) {

Modified: hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java Sat Jun  7 16:29:10 2014
@@ -50,9 +50,6 @@ public class IdUserGroup {
   static final String MAC_GET_ALL_USERS_CMD = "dscl . -list /Users UniqueID";
   static final String MAC_GET_ALL_GROUPS_CMD = "dscl . -list /Groups PrimaryGroupID";
 
-  // Used for finding the configured static mapping file.
-  static final String NFS_STATIC_MAPPING_FILE_KEY = "dfs.nfs.static.mapping.file";
-  private static final String NFS_STATIC_MAPPING_FILE_DEFAULT = "/etc/nfs.map";
   private final File staticMappingFile;
 
   // Used for parsing the static mapping file.
@@ -61,11 +58,7 @@ public class IdUserGroup {
   private static final Pattern MAPPING_LINE =
       Pattern.compile("^(uid|gid)\\s+(\\d+)\\s+(\\d+)\\s*(#.*)?$");
 
-  // Do update every 15 minutes by default
-  final static long TIMEOUT_DEFAULT = 15 * 60 * 1000; // ms
-  final static long TIMEOUT_MIN = 1 * 60 * 1000; // ms
   final private long timeout;
-  final static String NFS_USERUPDATE_MILLY = "hadoop.nfs.userupdate.milly";
   
   // Maps for id to name map. Guarded by this object monitor lock
   private BiMap<Integer, String> uidNameMap = HashBiMap.create();
@@ -73,25 +66,21 @@ public class IdUserGroup {
 
   private long lastUpdateTime = 0; // Last time maps were updated
   
-  public IdUserGroup() throws IOException {
-    timeout = TIMEOUT_DEFAULT;
-    staticMappingFile = new File(NFS_STATIC_MAPPING_FILE_DEFAULT);
-    updateMaps();
-  }
-  
   public IdUserGroup(Configuration conf) throws IOException {
-    long updateTime = conf.getLong(NFS_USERUPDATE_MILLY, TIMEOUT_DEFAULT);
+    long updateTime = conf.getLong(
+        Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_KEY,
+        Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_DEFAULT);
     // Minimal interval is 1 minute
-    if (updateTime < TIMEOUT_MIN) {
+    if (updateTime < Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_MIN) {
       LOG.info("User configured user account update time is less"
           + " than 1 minute. Use 1 minute instead.");
-      timeout = TIMEOUT_MIN;
+      timeout = Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_MIN;
     } else {
       timeout = updateTime;
     }
     
-    String staticFilePath = conf.get(NFS_STATIC_MAPPING_FILE_KEY,
-        NFS_STATIC_MAPPING_FILE_DEFAULT);
+    String staticFilePath = conf.get(Nfs3Constant.NFS_STATIC_MAPPING_FILE_KEY,
+        Nfs3Constant.NFS_STATIC_MAPPING_FILE_DEFAULT);
     staticMappingFile = new File(staticFilePath);
     
     updateMaps();

Modified: hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java Sat Jun  7 16:29:10 2014
@@ -33,7 +33,6 @@ import org.apache.hadoop.util.ShutdownHo
 public abstract class Nfs3Base {
   public static final Log LOG = LogFactory.getLog(Nfs3Base.class);
   private final RpcProgram rpcProgram;
-  private final int nfsPort;
   private int nfsBoundPort; // Will set after server starts
     
   public RpcProgram getRpcProgram() {
@@ -42,9 +41,7 @@ public abstract class Nfs3Base {
 
   protected Nfs3Base(RpcProgram rpcProgram, Configuration conf) {
     this.rpcProgram = rpcProgram;
-    this.nfsPort = conf.getInt(Nfs3Constant.NFS3_SERVER_PORT,
-        Nfs3Constant.NFS3_SERVER_PORT_DEFAULT);
-    LOG.info("NFS server port set to: " + nfsPort);
+    LOG.info("NFS server port set to: " + rpcProgram.getPort());
   }
 
   public void start(boolean register) {
@@ -58,7 +55,7 @@ public abstract class Nfs3Base {
   }
 
   private void startTCPServer() {
-    SimpleTcpServer tcpServer = new SimpleTcpServer(nfsPort,
+    SimpleTcpServer tcpServer = new SimpleTcpServer(rpcProgram.getPort(),
         rpcProgram, 0);
     rpcProgram.startDaemons();
     tcpServer.run();

Modified: hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java Sat Jun  7 16:29:10 2014
@@ -25,10 +25,6 @@ public class Nfs3Constant {
   // The local rpcbind/portmapper port.
   public final static int SUN_RPCBIND = 111;
 
-  // The IP port number for NFS.
-  public final static String NFS3_SERVER_PORT = "nfs3.server.port";
-  public final static int NFS3_SERVER_PORT_DEFAULT = 2049;
-
   // The RPC program number for NFS.
   public final static int PROGRAM = 100003;
 
@@ -191,36 +187,22 @@ public class Nfs3Constant {
   public final static int CREATE_GUARDED = 1;
   public final static int CREATE_EXCLUSIVE = 2;
   
-  public static final String EXPORTS_ALLOWED_HOSTS_SEPARATOR = ";";
-  /** Allowed hosts for nfs exports */
-  public static final String EXPORTS_ALLOWED_HOSTS_KEY = "dfs.nfs.exports.allowed.hosts";
-  public static final String EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT = "* rw";
   /** Size for nfs exports cache */
-  public static final String EXPORTS_CACHE_SIZE_KEY = "dfs.nfs.exports.cache.size";
-  public static final int EXPORTS_CACHE_SIZE_DEFAULT = 512;
+  public static final String NFS_EXPORTS_CACHE_SIZE_KEY = "nfs.exports.cache.size";
+  public static final int NFS_EXPORTS_CACHE_SIZE_DEFAULT = 512;
   /** Expiration time for nfs exports cache entry */
-  public static final String EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY = "dfs.nfs.exports.cache.expirytime.millis";
-  public static final long EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT = 15 * 60 * 1000; // 15 min
+  public static final String NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY = "nfs.exports.cache.expirytime.millis";
+  public static final long NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT = 15 * 60 * 1000; // 15 min
 
-  public static final String FILE_DUMP_DIR_KEY = "dfs.nfs3.dump.dir";
-  public static final String FILE_DUMP_DIR_DEFAULT = "/tmp/.hdfs-nfs";
-  public static final String ENABLE_FILE_DUMP_KEY = "dfs.nfs3.enableDump";
-  public static final boolean ENABLE_FILE_DUMP_DEFAULT = true;
-  public static final String MAX_READ_TRANSFER_SIZE_KEY = "dfs.nfs.rtmax";
-  public static final int MAX_READ_TRANSFER_SIZE_DEFAULT = 1024 * 1024;
-  public static final String MAX_WRITE_TRANSFER_SIZE_KEY = "dfs.nfs.wtmax";
-  public static final int MAX_WRITE_TRANSFER_SIZE_DEFAULT = 1024 * 1024;
-  public static final String MAX_READDIR_TRANSFER_SIZE_KEY = "dfs.nfs.dtmax";
-  public static final int MAX_READDIR_TRANSFER_SIZE_DEFAULT = 64 * 1024;
-  public static final String MAX_OPEN_FILES = "dfs.nfs3.max.open.files";
-  public static final int MAX_OPEN_FILES_DEFAULT = 256;
-  public static final String OUTPUT_STREAM_TIMEOUT = "dfs.nfs3.stream.timeout";
-  public static final long OUTPUT_STREAM_TIMEOUT_DEFAULT = 10 * 60 * 1000; // 10 minutes
-  public static final long OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT = 10 * 1000; //10 seconds
+  /** Do user/group update every 15 minutes by default, minimum 1 minute */
+  public final static String NFS_USERGROUP_UPDATE_MILLIS_KEY = "nfs.usergroup.update.millis";
+  public final static long NFS_USERGROUP_UPDATE_MILLIS_DEFAULT = 15 * 60 * 1000; // ms
+  final static long NFS_USERGROUP_UPDATE_MILLIS_MIN = 1 * 60 * 1000; // ms
   
   public final static String UNKNOWN_USER = "nobody";
   public final static String UNKNOWN_GROUP = "nobody";
   
-  public final static String EXPORT_POINT = "dfs.nfs3.export.point";
-  public final static String EXPORT_POINT_DEFAULT = "/";
+  // Used for finding the configured static mapping file.
+  public static final String NFS_STATIC_MAPPING_FILE_KEY = "nfs.static.mapping.file";
+  public static final String NFS_STATIC_MAPPING_FILE_DEFAULT = "/etc/nfs.map";
 }

Modified: hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsExports.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsExports.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsExports.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsExports.java Sat Jun  7 16:29:10 2014
@@ -17,11 +17,8 @@
  */
 package org.apache.hadoop.nfs;
 
-import org.junit.Assert;
-
-import org.apache.hadoop.nfs.AccessPrivilege;
-import org.apache.hadoop.nfs.NfsExports;
 import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
+import org.junit.Assert;
 import org.junit.Test;
 
 public class TestNfsExports {
@@ -32,9 +29,9 @@ public class TestNfsExports {
   private final String hostname2 = "a.b.org";
   
   private static final long ExpirationPeriod = 
-      Nfs3Constant.EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT * 1000 * 1000;
+      Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT * 1000 * 1000;
   
-  private static final int CacheSize = Nfs3Constant.EXPORTS_CACHE_SIZE_DEFAULT;
+  private static final int CacheSize = Nfs3Constant.NFS_EXPORTS_CACHE_SIZE_DEFAULT;
   private static final long NanosPerMillis = 1000000;
 
   @Test

Modified: hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestIdUserGroup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestIdUserGroup.java?rev=1601151&r1=1601150&r2=1601151&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestIdUserGroup.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestIdUserGroup.java Sat Jun  7 16:29:10 2014
@@ -199,17 +199,19 @@ public class TestIdUserGroup {
 
   @Test
   public void testUserUpdateSetting() throws IOException {
-    IdUserGroup iug = new IdUserGroup();
-    assertEquals(iug.getTimeout(), IdUserGroup.TIMEOUT_DEFAULT);
+    IdUserGroup iug = new IdUserGroup(new Configuration());
+    assertEquals(iug.getTimeout(),
+        Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_DEFAULT);
 
     Configuration conf = new Configuration();
-    conf.setLong(IdUserGroup.NFS_USERUPDATE_MILLY, 0);
+    conf.setLong(Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_KEY, 0);
     iug = new IdUserGroup(conf);
-    assertEquals(iug.getTimeout(), IdUserGroup.TIMEOUT_MIN);
+    assertEquals(iug.getTimeout(), Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_MIN);
 
-    conf.setLong(IdUserGroup.NFS_USERUPDATE_MILLY,
-        IdUserGroup.TIMEOUT_DEFAULT * 2);
+    conf.setLong(Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_KEY,
+        Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_DEFAULT * 2);
     iug = new IdUserGroup(conf);
-    assertEquals(iug.getTimeout(), IdUserGroup.TIMEOUT_DEFAULT * 2);
+    assertEquals(iug.getTimeout(),
+        Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_DEFAULT * 2);
   }
 }



Mime
View raw message