hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1462698 - in /hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common: ./ src/main/conf/ src/main/docs/ src/main/java/ src/main/java/org/apache/hadoop/fs/ src/main/java/org/apache/hadoop/fs/s3/ src/main/java/org/apache/hadoop/...
Date Sat, 30 Mar 2013 03:50:08 GMT
Author: szetszwo
Date: Sat Mar 30 03:50:03 2013
New Revision: 1462698

URL: http://svn.apache.org/r1462698
Log:
Merge r1460409 through r1462697 from trunk.

Modified:
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/CHANGES.txt   (contents, props changed)
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/docs/   (props changed)
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/   (props changed)
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/core/   (props changed)
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCanonicalization.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3/InMemoryFileSystemStore.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java
    hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1462698&r1=1462697&r2=1462698&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/CHANGES.txt Sat Mar 30 03:50:03 2013
@@ -17,6 +17,8 @@ Trunk (Unreleased)
 
     HADOOP-9380 Add totalLength to rpc response  (sanjay Radia)
 
+    HADOOP-9194. RPC Support for QoS. (Junping Du via llu)
+
   NEW FEATURES
     
     HADOOP-8561. Introduce HADOOP_PROXY_USER for secure impersonation in child
@@ -90,9 +92,6 @@ Trunk (Unreleased)
     HADOOP-8367 Improve documentation of declaringClassProtocolName in 
     rpc headers. (Sanjay Radia)
 
-    HADOOP-8415. Add getDouble() and setDouble() in
-    org.apache.hadoop.conf.Configuration (Jan van der Lugt via harsh)
-
     HADOOP-7659. fs -getmerge isn't guaranteed to work well over non-HDFS
     filesystems (harsh)
 
@@ -162,9 +161,7 @@ Trunk (Unreleased)
 
     HADOOP-9218 Document the Rpc-wrappers used internally (sanjay Radia)
 
-    HADOOP-9112. test-patch should -1 for @Tests without a timeout 
-    (Surenkumar Nihalani via bobby)
-
+	HADOOP-9258 Add stricter tests to FileSystemContractTestBase (stevel)
 
   BUG FIXES
 
@@ -358,6 +355,15 @@ Trunk (Unreleased)
     HADOOP-9431 TestSecurityUtil#testLocalHostNameForNullOrWild on systems where hostname
     contains capital letters  (Chris Nauroth via sanjay)
 
+    HADOOP-9261 S3n filesystem can move a directory under itself -and so lose data
+    (fixed in HADOOP-9258) (stevel)
+
+    HADOOP-9265 S3 blockstore filesystem breaks part of the Filesystem contract
+    (fixed in HADOOP-9258) (stevel)
+
+    HADOOP-9433 TestLocalFileSystem#testHasFileDescriptor leaks file handle
+    (Chris Nauroth via sanjay)
+
   OPTIMIZATIONS
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
@@ -505,6 +511,9 @@ Release 2.0.5-beta - UNRELEASED
 
     HADOOP-9283. Add support for running the Hadoop client on AIX. (atm)
 
+    HADOOP-8415. Add getDouble() and setDouble() in
+    org.apache.hadoop.conf.Configuration (Jan van der Lugt via harsh)
+
   IMPROVEMENTS
 
     HADOOP-9253. Capture ulimit info in the logs at service start time.
@@ -525,8 +534,13 @@ Release 2.0.5-beta - UNRELEASED
     HADOOP-9318. When exiting on a signal, print the signal name first. (Colin
     Patrick McCabe via atm)
 
+    HADOOP-9358. "Auth failed" log should include exception string (todd)
+
   OPTIMIZATIONS
 
+    HADOOP-9150. Avoid unnecessary DNS resolution attempts for logical URIs
+    (todd)
+
   BUG FIXES
 
     HADOOP-9294. GetGroupsTestBase fails on Windows. (Chris Nauroth via suresh)
@@ -585,6 +599,14 @@ Release 2.0.5-beta - UNRELEASED
     HADOOP-9299.  kerberos name resolution is kicking in even when kerberos
     is not configured (daryn)
 
+    HADOOP-9430. TestSSLFactory fails on IBM JVM. (Amir Sanjar via suresh)
+
+    HADOOP-9125. LdapGroupsMapping threw CommunicationException after some
+    idle time. (Kai Zheng via atm)
+
+    HADOOP-9357. Fallback to default authority if not specified in FileContext.
+    (Andrew Wang via eli)
+
 Release 2.0.4-alpha - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -605,6 +627,9 @@ Release 2.0.4-alpha - UNRELEASED
     HADOOP-9408. misleading description for net.topology.table.file.name
     property in core-default.xml. (rajeshbabu via suresh)
 
+    HADOOP-9444. Modify hadoop-policy.xml to replace unexpanded variables to a
+    default value of '*'. (Roman Shaposhnik via vinodkv)
+
 Release 2.0.3-alpha - 2013-02-06 
 
   INCOMPATIBLE CHANGES

Propchange: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/CHANGES.txt
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt:r1460409-1462697

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml?rev=1462698&r1=1462697&r2=1462698&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml Sat Mar 30 03:50:03 2013
@@ -77,7 +77,7 @@
 
  <property>
     <name>security.admin.operations.protocol.acl</name>
-    <value>${HADOOP_HDFS_USER}</value>
+    <value>*</value>
     <description>ACL for AdminOperationsProtocol. Used for admin commands.
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
@@ -86,7 +86,7 @@
 
   <property>
     <name>security.refresh.usertogroups.mappings.protocol.acl</name>
-    <value>${HADOOP_HDFS_USER}</value>
+    <value>*</value>
     <description>ACL for RefreshUserMappingsProtocol. Used to refresh
     users mappings. The ACL is a comma-separated list of user and
     group names. The user and group list is separated by a blank. For
@@ -96,7 +96,7 @@
 
   <property>
     <name>security.refresh.policy.protocol.acl</name>
-    <value>${HADOOP_HDFS_USER}</value>
+    <value>*</value>
     <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
     dfsadmin and mradmin commands to refresh the security policy in-effect.
     The ACL is a comma-separated list of user and group names. The user and
@@ -120,7 +120,7 @@
 
   <property>
     <name>security.qjournal.service.protocol.acl</name>
-    <value>${HADOOP_HDFS_USER}</value>
+    <value>*</value>
     <description>ACL for QJournalProtocol, used by the NN to communicate with
     JNs when using the QuorumJournalManager for edit logs.</description>
   </property>
@@ -139,7 +139,7 @@
 
   <property>
     <name>security.resourcetracker.protocol.acl</name>
-    <value>${HADOOP_YARN_USER}</value>
+    <value>*</value>
     <description>ACL for ResourceTracker protocol, used by the
     ResourceManager and NodeManager to communicate with each other.
     The ACL is a comma-separated list of user and group names. The user and
@@ -149,7 +149,7 @@
 
   <property>
     <name>security.admin.protocol.acl</name>
-    <value>${HADOOP_YARN_USER}</value>
+    <value>*</value>
     <description>ACL for RMAdminProtocol, for admin commands. 
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".

Propchange: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/docs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/docs:r1460409-1462697

Propchange: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java:r1460409-1462697

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java?rev=1462698&r1=1462697&r2=1462698&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java Sat Mar 30 03:50:03 2013
@@ -244,17 +244,33 @@ public final class FileContext {
   }
  
   /* 
-   * Remove relative part - return "absolute":
-   * If input is relative path ("foo/bar") add wd: ie "/<workingDir>/foo/bar"
-   * A fully qualified uri ("hdfs://nn:p/foo/bar") or a slash-relative path
+   * Resolve a relative path passed from the user.
+   * 
+   * Relative paths are resolved against the current working directory
+   * (e.g. "foo/bar" becomes "/<workingDir>/foo/bar").
+   * Fully-qualified URIs (e.g. "hdfs://nn:p/foo/bar") and slash-relative paths
    * ("/foo/bar") are returned unchanged.
    * 
+   * Additionally, we fix malformed URIs that specify a scheme but not an 
+   * authority (e.g. "hdfs:///foo/bar"). Per RFC 2395, we remove the scheme
+   * if it matches the default FS, and let the default FS add in the default
+   * scheme and authority later (see {@link #AbstractFileSystem#checkPath}).
+   * 
    * Applications that use FileContext should use #makeQualified() since
-   * they really want a fully qualified URI.
+   * they really want a fully-qualified URI.
    * Hence this method is not called makeAbsolute() and 
    * has been deliberately declared private.
    */
   private Path fixRelativePart(Path p) {
+    // Per RFC 2396 5.2, drop schema if there is a scheme but no authority.
+    if (p.hasSchemeAndNoAuthority()) {
+      String scheme = p.toUri().getScheme();
+      if (scheme.equalsIgnoreCase(defaultFS.getUri().getScheme())) {
+        p = new Path(p.toUri().getSchemeSpecificPart());
+      }
+    }
+    // Absolute paths are unchanged. Relative paths are resolved against the
+    // current working directory.
     if (p.isUriPathAbsolute()) {
       return p;
     } else {

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java?rev=1462698&r1=1462697&r2=1462698&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java Sat Mar 30 03:50:03 2013
@@ -21,6 +21,7 @@ import java.io.Closeable;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.net.URI;
+import java.net.URISyntaxException;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -211,12 +212,46 @@ public abstract class FileSystem extends
   public abstract URI getUri();
   
   /**
-   * Resolve the uri's hostname and add the default port if not in the uri
+   * Return a canonicalized form of this FileSystem's URI.
+   * 
+   * The default implementation simply calls {@link #canonicalizeUri(URI)}
+   * on the filesystem's own URI, so subclasses typically only need to
+   * implement that method.
+   *
+   * @see #canonicalizeUri(URI)
+   */
+  protected URI getCanonicalUri() {
+    return canonicalizeUri(getUri());
+  }
+  
+  /**
+   * Canonicalize the given URI.
+   * 
+   * This is filesystem-dependent, but may for example consist of
+   * canonicalizing the hostname using DNS and adding the default
+   * port if not specified.
+   * 
+   * The default implementation simply fills in the default port if
+   * not specified and if the filesystem has a default port.
+   *
    * @return URI
    * @see NetUtils#getCanonicalUri(URI, int)
    */
-  protected URI getCanonicalUri() {
-    return NetUtils.getCanonicalUri(getUri(), getDefaultPort());
+  protected URI canonicalizeUri(URI uri) {
+    if (uri.getPort() == -1 && getDefaultPort() > 0) {
+      // reconstruct the uri with the default port set
+      try {
+        uri = new URI(uri.getScheme(), uri.getUserInfo(),
+            uri.getHost(), getDefaultPort(),
+            uri.getPath(), uri.getQuery(), uri.getFragment());
+      } catch (URISyntaxException e) {
+        // Should never happen!
+        throw new AssertionError("Valid URI became unparseable: " +
+            uri);
+      }
+    }
+    
+    return uri;
   }
   
   /**
@@ -581,7 +616,7 @@ public abstract class FileSystem extends
       }
       if (uri != null) {
         // canonicalize uri before comparing with this fs
-        uri = NetUtils.getCanonicalUri(uri, getDefaultPort());
+        uri = canonicalizeUri(uri);
         thatAuthority = uri.getAuthority();
         if (thisAuthority == thatAuthority ||       // authorities match
             (thisAuthority != null &&

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java?rev=1462698&r1=1462697&r2=1462698&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java Sat Mar 30 03:50:03 2013
@@ -95,16 +95,18 @@ public class FilterFileSystem extends Fi
   public URI getUri() {
     return fs.getUri();
   }
-
-  /**
-   * Returns a qualified URI whose scheme and authority identify this
-   * FileSystem.
-   */
+  
+  
   @Override
   protected URI getCanonicalUri() {
     return fs.getCanonicalUri();
   }
-  
+
+  @Override
+  protected URI canonicalizeUri(URI uri) {
+    return fs.canonicalizeUri(uri);
+  }
+
   /** Make sure that a path specifies a FileSystem. */
   @Override
   public Path makeQualified(Path path) {

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java?rev=1462698&r1=1462697&r2=1462698&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java Sat Mar 30 03:50:03 2013
@@ -256,6 +256,10 @@ public class Path implements Comparable 
     return  (isUriPathAbsolute() && 
         uri.getScheme() == null && uri.getAuthority() == null);
   }
+
+  public boolean hasSchemeAndNoAuthority() {
+    return uri.getScheme() != null && uri.getAuthority() == null;
+  }
   
   /**
    *  True if the path component (i.e. directory) of this URI is absolute.

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java?rev=1462698&r1=1462697&r2=1462698&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java Sat Mar 30 03:50:03 2013
@@ -137,9 +137,15 @@ class Jets3tFileSystemStore implements F
 
   @Override
   public boolean inodeExists(Path path) throws IOException {
-    InputStream in = get(pathToKey(path), true);
+    String key = pathToKey(path);
+    InputStream in = get(key, true);
     if (in == null) {
-      return false;
+      if (isRoot(key)) {
+        storeINode(path, INode.DIRECTORY_INODE);
+        return true;
+      } else {
+        return false;
+      }
     }
     in.close();
     return true;
@@ -211,7 +217,13 @@ class Jets3tFileSystemStore implements F
 
   @Override
   public INode retrieveINode(Path path) throws IOException {
-    return INode.deserialize(get(pathToKey(path), true));
+    String key = pathToKey(path);
+    InputStream in = get(key, true);
+    if (in == null && isRoot(key)) {
+      storeINode(path, INode.DIRECTORY_INODE);
+      return INode.DIRECTORY_INODE;
+    }
+    return INode.deserialize(in);
   }
 
   @Override
@@ -366,6 +378,10 @@ class Jets3tFileSystemStore implements F
     return blockToKey(block.getId());
   }
 
+  private boolean isRoot(String key) {
+    return key.isEmpty() || key.equals("/");
+  }
+
   @Override
   public void purge() throws IOException {
     try {

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java?rev=1462698&r1=1462697&r2=1462698&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java Sat Mar 30 03:50:03 2013
@@ -252,32 +252,125 @@ public class S3FileSystem extends FileSy
   @Override
   public boolean rename(Path src, Path dst) throws IOException {
     Path absoluteSrc = makeAbsolute(src);
+    final String debugPreamble = "Renaming '" + src + "' to '" + dst + "' - ";
     INode srcINode = store.retrieveINode(absoluteSrc);
+    boolean debugEnabled = LOG.isDebugEnabled();
     if (srcINode == null) {
       // src path doesn't exist
+      if (debugEnabled) {
+        LOG.debug(debugPreamble + "returning false as src does not exist");
+      }
       return false; 
     }
+
     Path absoluteDst = makeAbsolute(dst);
-    INode dstINode = store.retrieveINode(absoluteDst);
-    if (dstINode != null && dstINode.isDirectory()) {
-      absoluteDst = new Path(absoluteDst, absoluteSrc.getName());
-      dstINode = store.retrieveINode(absoluteDst);
-    }
-    if (dstINode != null) {
-      // dst path already exists - can't overwrite
-      return false;
-    }
+
+    //validate the parent dir of the destination
     Path dstParent = absoluteDst.getParent();
     if (dstParent != null) {
+      //if the dst parent is not root, make sure it exists
       INode dstParentINode = store.retrieveINode(dstParent);
-      if (dstParentINode == null || dstParentINode.isFile()) {
-        // dst parent doesn't exist or is a file
+      if (dstParentINode == null) {
+        // dst parent doesn't exist
+        if (debugEnabled) {
+          LOG.debug(debugPreamble +
+                    "returning false as dst parent does not exist");
+        }
+        return false;
+      }
+      if (dstParentINode.isFile()) {
+        // dst parent exists but is a file
+        if (debugEnabled) {
+          LOG.debug(debugPreamble +
+                    "returning false as dst parent exists and is a file");
+        }
+        return false;
+      }
+    }
+
+    //get status of source
+    boolean srcIsFile = srcINode.isFile();
+
+    INode dstINode = store.retrieveINode(absoluteDst);
+    boolean destExists = dstINode != null;
+    boolean destIsDir = destExists && !dstINode.isFile();
+    if (srcIsFile) {
+
+      //source is a simple file
+      if (destExists) {
+        if (destIsDir) {
+          //outcome #1 dest exists and is dir -filename to subdir of dest
+          if (debugEnabled) {
+            LOG.debug(debugPreamble +
+                      "copying src file under dest dir to " + absoluteDst);
+          }
+          absoluteDst = new Path(absoluteDst, absoluteSrc.getName());
+        } else {
+          //outcome #2 dest it's a file: fail iff different from src
+          boolean renamingOnToSelf = absoluteSrc.equals(absoluteDst);
+          if (debugEnabled) {
+            LOG.debug(debugPreamble +
+                      "copying file onto file, outcome is " + renamingOnToSelf);
+          }
+          return renamingOnToSelf;
+        }
+      } else {
+        // #3 dest does not exist: use dest as path for rename
+        if (debugEnabled) {
+          LOG.debug(debugPreamble +
+                    "copying file onto file");
+        }
+      }
+    } else {
+      //here the source exists and is a directory
+      // outcomes (given we know the parent dir exists if we get this far)
+      // #1 destination is a file: fail
+      // #2 destination is a directory: create a new dir under that one
+      // #3 destination doesn't exist: create a new dir with that name
+      // #3 and #4 are only allowed if the dest path is not == or under src
+
+      if (destExists) {
+        if (!destIsDir) {
+          // #1 destination is a file: fail
+          if (debugEnabled) {
+            LOG.debug(debugPreamble +
+                      "returning false as src is a directory, but not dest");
+          }
+          return false;
+        } else {
+          // the destination dir exists
+          // destination for rename becomes a subdir of the target name
+          absoluteDst = new Path(absoluteDst, absoluteSrc.getName());
+          if (debugEnabled) {
+            LOG.debug(debugPreamble +
+                      "copying src dir under dest dir to " + absoluteDst);
+          }
+        }
+      }
+      //the final destination directory is now know, so validate it for
+      //illegal moves
+
+      if (absoluteSrc.equals(absoluteDst)) {
+        //you can't rename a directory onto itself
+        if (debugEnabled) {
+          LOG.debug(debugPreamble +
+                    "Dest==source && isDir -failing");
+        }
+        return false;
+      }
+      if (absoluteDst.toString().startsWith(absoluteSrc.toString() + "/")) {
+        //you can't move a directory under itself
+        if (debugEnabled) {
+          LOG.debug(debugPreamble +
+                    "dst is equal to or under src dir -failing");
+        }
         return false;
       }
     }
+    //here the dest path is set up -so rename
     return renameRecursive(absoluteSrc, absoluteDst);
   }
-  
+
   private boolean renameRecursive(Path src, Path dst) throws IOException {
     INode srcINode = store.retrieveINode(src);
     store.storeINode(dst, srcINode);

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java?rev=1462698&r1=1462697&r2=1462698&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java Sat Mar 30 03:50:03 2013
@@ -582,35 +582,58 @@ public class NativeS3FileSystem extends 
   public boolean rename(Path src, Path dst) throws IOException {
 
     String srcKey = pathToKey(makeAbsolute(src));
+    final String debugPreamble = "Renaming '" + src + "' to '" + dst + "' - ";
 
     if (srcKey.length() == 0) {
       // Cannot rename root of file system
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(debugPreamble +
+                  "returning false as cannot rename the root of a filesystem");
+      }
       return false;
     }
 
-    final String debugPreamble = "Renaming '" + src + "' to '" + dst + "' - ";
-
+    //get status of source
+    boolean srcIsFile;
+    try {
+      srcIsFile = getFileStatus(src).isFile();
+    } catch (FileNotFoundException e) {
+      //bail out fast if the source does not exist
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(debugPreamble + "returning false as src does not exist");
+      }
+      return false;
+    }
     // Figure out the final destination
-    String dstKey;
+    String dstKey = pathToKey(makeAbsolute(dst));
+
     try {
       boolean dstIsFile = getFileStatus(dst).isFile();
       if (dstIsFile) {
+        //destination is a file.
+        //you can't copy a file or a directory onto an existing file
+        //except for the special case of dest==src, which is a no-op
         if(LOG.isDebugEnabled()) {
           LOG.debug(debugPreamble +
-              "returning false as dst is an already existing file");
+              "returning without rename as dst is an already existing file");
         }
-        return false;
+        //exit, returning true iff the rename is onto self
+        return srcKey.equals(dstKey);
       } else {
+        //destination exists and is a directory
         if(LOG.isDebugEnabled()) {
           LOG.debug(debugPreamble + "using dst as output directory");
         }
+        //destination goes under the dst path, with the name of the
+        //source entry
         dstKey = pathToKey(makeAbsolute(new Path(dst, src.getName())));
       }
     } catch (FileNotFoundException e) {
+      //destination does not exist => the source file or directory
+      //is copied over with the name of the destination
       if(LOG.isDebugEnabled()) {
         LOG.debug(debugPreamble + "using dst as output destination");
       }
-      dstKey = pathToKey(makeAbsolute(dst));
       try {
         if (getFileStatus(dst.getParent()).isFile()) {
           if(LOG.isDebugEnabled()) {
@@ -628,16 +651,17 @@ public class NativeS3FileSystem extends 
       }
     }
 
-    boolean srcIsFile;
-    try {
-      srcIsFile = getFileStatus(src).isFile();
-    } catch (FileNotFoundException e) {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug(debugPreamble + "returning false as src does not exist");
+    //rename to self behavior follows Posix rules and is different
+    //for directories and files -the return code is driven by src type
+    if (srcKey.equals(dstKey)) {
+      //fully resolved destination key matches source: fail
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(debugPreamble + "renamingToSelf; returning true");
       }
-      return false;
+      return true;
     }
     if (srcIsFile) {
+      //source is a file; COPY then DELETE
       if(LOG.isDebugEnabled()) {
         LOG.debug(debugPreamble +
             "src is file, so doing copy then delete in S3");
@@ -645,9 +669,19 @@ public class NativeS3FileSystem extends 
       store.copy(srcKey, dstKey);
       store.delete(srcKey);
     } else {
+      //src is a directory
       if(LOG.isDebugEnabled()) {
         LOG.debug(debugPreamble + "src is directory, so copying contents");
       }
+      //Verify dest is not a child of the parent
+      if (dstKey.startsWith(srcKey + "/")) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug(
+            debugPreamble + "cannot rename a directory to a subdirectory of self");
+        }
+        return false;
+      }
+      //create the subdir under the destination
       store.storeEmptyFile(dstKey + FOLDER_SUFFIX);
 
       List<String> keysToDelete = new ArrayList<String>();

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java?rev=1462698&r1=1462697&r2=1462698&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java Sat Mar 30 03:50:03 2013
@@ -257,6 +257,7 @@ public class Client {
     private final ConnectionId remoteId;                // connection id
     private AuthMethod authMethod; // authentication method
     private Token<? extends TokenIdentifier> token;
+    private int serviceClass;
     private SaslRpcClient saslRpcClient;
     
     private Socket socket = null;                 // connected socket
@@ -279,7 +280,7 @@ public class Client {
     
     private final Object sendRpcRequestLock = new Object();
 
-    public Connection(ConnectionId remoteId) throws IOException {
+    public Connection(ConnectionId remoteId, int serviceClass) throws IOException {
       this.remoteId = remoteId;
       this.server = remoteId.getAddress();
       if (server.isUnresolved()) {
@@ -296,6 +297,7 @@ public class Client {
       this.tcpNoDelay = remoteId.getTcpNoDelay();
       this.doPing = remoteId.getDoPing();
       this.pingInterval = remoteId.getPingInterval();
+      this.serviceClass = serviceClass;
       if (LOG.isDebugEnabled()) {
         LOG.debug("The ping interval is " + this.pingInterval + " ms.");
       }
@@ -747,7 +749,9 @@ public class Client {
      * +----------------------------------+
      * |  "hrpc" 4 bytes                  |      
      * +----------------------------------+
-     * |  Version (1 bytes)               |      
+     * |  Version (1 byte)                |
+     * +----------------------------------+
+     * |  Service Class (1 byte)          |
      * +----------------------------------+
      * |  Authmethod (1 byte)             |      
      * +----------------------------------+
@@ -760,6 +764,7 @@ public class Client {
       // Write out the header, version and authentication method
       out.write(Server.HEADER.array());
       out.write(Server.CURRENT_VERSION);
+      out.write(serviceClass);
       authMethod.write(out);
       Server.IpcSerializationType.PROTOBUF.write(out);
       out.flush();
@@ -1179,20 +1184,34 @@ public class Client {
 
   
   /**
-   * Same as {@link #call(RPC.RpcKind, Writable, InetSocketAddress, 
+   * Same as {@link #call(RPC.RpcKind, Writable, InetSocketAddress,
    * Class, UserGroupInformation, int, Configuration)}
    * except that rpcKind is writable.
    */
-  public Writable call(Writable param, InetSocketAddress addr, 
+  public Writable call(Writable param, InetSocketAddress addr,
       Class<?> protocol, UserGroupInformation ticket,
-      int rpcTimeout, Configuration conf)  
+      int rpcTimeout, Configuration conf)
       throws InterruptedException, IOException {
-        ConnectionId remoteId = ConnectionId.getConnectionId(addr, protocol,
+    ConnectionId remoteId = ConnectionId.getConnectionId(addr, protocol,
         ticket, rpcTimeout, conf);
     return call(RPC.RpcKind.RPC_BUILTIN, param, remoteId);
   }
   
   /**
+   * Same as {@link #call(Writable, InetSocketAddress,
+   * Class, UserGroupInformation, int, Configuration)}
+   * except that specifying serviceClass.
+   */
+  public Writable call(Writable param, InetSocketAddress addr,
+      Class<?> protocol, UserGroupInformation ticket,
+      int rpcTimeout, int serviceClass, Configuration conf)
+      throws InterruptedException, IOException {
+    ConnectionId remoteId = ConnectionId.getConnectionId(addr, protocol,
+        ticket, rpcTimeout, conf);
+    return call(RPC.RpcKind.RPC_BUILTIN, param, remoteId, serviceClass);
+  }
+
+  /**
    * Make a call, passing <code>param</code>, to the IPC server running at
    * <code>address</code> which is servicing the <code>protocol</code> protocol,
    * with the <code>ticket</code> credentials, <code>rpcTimeout</code> as
@@ -1218,6 +1237,22 @@ public class Client {
      return call(RPC.RpcKind.RPC_BUILTIN, param, remoteId);
   }
   
+  /**
+   * Make a call, passing <code>rpcRequest</code>, to the IPC server defined by
+   * <code>remoteId</code>, returning the rpc respond.
+   *
+   * @param rpcKind
+   * @param rpcRequest -  contains serialized method and method parameters
+   * @param remoteId - the target rpc server
+   * @returns the rpc response
+   * Throws exceptions if there are network problems or if the remote code 
+   * threw an exception.
+   */
+  public Writable call(RPC.RpcKind rpcKind, Writable rpcRequest,
+      ConnectionId remoteId) throws InterruptedException, IOException {
+    return call(rpcKind, rpcRequest, remoteId, RPC.RPC_SERVICE_CLASS_DEFAULT);
+  }
+
   /** 
    * Make a call, passing <code>rpcRequest</code>, to the IPC server defined by
    * <code>remoteId</code>, returning the rpc respond.
@@ -1225,14 +1260,16 @@ public class Client {
    * @param rpcKind
    * @param rpcRequest -  contains serialized method and method parameters
    * @param remoteId - the target rpc server
+   * @param serviceClass - service class for RPC
    * @returns the rpc response
    * Throws exceptions if there are network problems or if the remote code 
    * threw an exception.
    */
   public Writable call(RPC.RpcKind rpcKind, Writable rpcRequest,
-      ConnectionId remoteId) throws InterruptedException, IOException {
+      ConnectionId remoteId, int serviceClass)
+      throws InterruptedException, IOException {
     Call call = new Call(rpcKind, rpcRequest);
-    Connection connection = getConnection(remoteId, call);
+    Connection connection = getConnection(remoteId, call, serviceClass);
     try {
       connection.sendRpcRequest(call);                 // send the rpc request
     } catch (RejectedExecutionException e) {
@@ -1289,7 +1326,7 @@ public class Client {
   /** Get a connection from the pool, or create a new one and add it to the
    * pool.  Connections to a given ConnectionId are reused. */
   private Connection getConnection(ConnectionId remoteId,
-                                   Call call)
+                                   Call call, int serviceClass)
                                    throws IOException, InterruptedException {
     if (!running.get()) {
       // the client is stopped
@@ -1304,7 +1341,7 @@ public class Client {
       synchronized (connections) {
         connection = connections.get(remoteId);
         if (connection == null) {
-          connection = new Connection(remoteId);
+          connection = new Connection(remoteId, serviceClass);
           connections.put(remoteId, connection);
         }
       }

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java?rev=1462698&r1=1462697&r2=1462698&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java Sat Mar 30 03:50:03 2013
@@ -77,12 +77,12 @@ import com.google.protobuf.BlockingServi
 @InterfaceAudience.LimitedPrivate(value = { "Common", "HDFS", "MapReduce", "Yarn" })
 @InterfaceStability.Evolving
 public class RPC {
+  final static int RPC_SERVICE_CLASS_DEFAULT = 0;
   public enum RpcKind {
     RPC_BUILTIN ((short) 1),         // Used for built in calls by tests
     RPC_WRITABLE ((short) 2),        // Use WritableRpcEngine 
     RPC_PROTOCOL_BUFFER ((short) 3); // Use ProtobufRpcEngine
     final static short MAX_INDEX = RPC_PROTOCOL_BUFFER.value; // used for array size
-    private static final short FIRST_INDEX = RPC_BUILTIN.value;    
     public final short value; //TODO make it private
 
     RpcKind(short val) {

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java?rev=1462698&r1=1462697&r2=1462698&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java Sat Mar 30 03:50:03 2013
@@ -438,6 +438,11 @@ public abstract class Server {
     return Arrays.asList(handlers);
   }
 
+  @VisibleForTesting
+  List<Connection> getConnections() {
+    return connectionList;
+  }
+
   /**
    * Refresh the service authorization ACL for the service handled by this server.
    */
@@ -1104,6 +1109,7 @@ public abstract class Server {
     private ByteBuffer connectionHeaderBuf = null;
     private ByteBuffer unwrappedData;
     private ByteBuffer unwrappedDataLengthBuffer;
+    private int serviceClass;
     
     UserGroupInformation user = null;
     public UserGroupInformation attemptingUser = null; // user name before auth
@@ -1231,7 +1237,8 @@ public abstract class Server {
           rpcMetrics.incrAuthenticationFailures();
           String clientIP = this.toString();
           // attempting user could be null
-          AUDITLOG.warn(AUTH_FAILED_FOR + clientIP + ":" + attemptingUser);
+          AUDITLOG.warn(AUTH_FAILED_FOR + clientIP + ":" + attemptingUser +
+            " (" + e.getLocalizedMessage() + ")");
           throw e;
         }
         if (saslServer.isComplete() && replyToken == null) {
@@ -1314,14 +1321,17 @@ public abstract class Server {
         if (!connectionHeaderRead) {
           //Every connection is expected to send the header.
           if (connectionHeaderBuf == null) {
-            connectionHeaderBuf = ByteBuffer.allocate(3);
+            connectionHeaderBuf = ByteBuffer.allocate(4);
           }
           count = channelRead(channel, connectionHeaderBuf);
           if (count < 0 || connectionHeaderBuf.remaining() > 0) {
             return count;
           }
           int version = connectionHeaderBuf.get(0);
-          byte[] method = new byte[] {connectionHeaderBuf.get(1)};
+          // TODO we should add handler for service class later
+          this.setServiceClass(connectionHeaderBuf.get(1));
+
+          byte[] method = new byte[] {connectionHeaderBuf.get(2)};
           authMethod = AuthMethod.read(new DataInputStream(
               new ByteArrayInputStream(method)));
           dataLengthBuffer.flip();
@@ -1345,7 +1355,7 @@ public abstract class Server {
           }
           
           IpcSerializationType serializationType = IpcSerializationType
-              .fromByte(connectionHeaderBuf.get(2));
+              .fromByte(connectionHeaderBuf.get(3));
           if (serializationType != IpcSerializationType.PROTOBUF) {
             respondUnsupportedSerialization(serializationType);
             return -1;
@@ -1735,6 +1745,22 @@ public abstract class Server {
       return true;
     }
     
+    /**
+     * Get service class for connection
+     * @return the serviceClass
+     */
+    public int getServiceClass() {
+      return serviceClass;
+    }
+
+    /**
+     * Set service class for connection
+     * @param serviceClass the serviceClass to set
+     */
+    public void setServiceClass(int serviceClass) {
+      this.serviceClass = serviceClass;
+    }
+
     private synchronized void close() throws IOException {
       disposeSasl();
       data = null;

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java?rev=1462698&r1=1462697&r2=1462698&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java Sat Mar 30 03:50:03 2013
@@ -24,6 +24,7 @@ import java.util.ArrayList;
 import java.util.Hashtable;
 import java.util.List;
 
+import javax.naming.CommunicationException;
 import javax.naming.Context;
 import javax.naming.NamingEnumeration;
 import javax.naming.NamingException;
@@ -166,6 +167,8 @@ public class LdapGroupsMapping
   private String groupMemberAttr;
   private String groupNameAttr;
 
+  public static int RECONNECT_RETRY_COUNT = 3;
+  
   /**
    * Returns list of groups for a user.
    * 
@@ -178,34 +181,63 @@ public class LdapGroupsMapping
    */
   @Override
   public synchronized List<String> getGroups(String user) throws IOException {
+    List<String> emptyResults = new ArrayList<String>();
+    /*
+     * Normal garbage collection takes care of removing Context instances when they are no longer in use. 
+     * Connections used by Context instances being garbage collected will be closed automatically.
+     * So in case connection is closed and gets CommunicationException, retry some times with new new DirContext/connection. 
+     */
+    try {
+      return doGetGroups(user);
+    } catch (CommunicationException e) {
+      LOG.warn("Connection is closed, will try to reconnect");
+    } catch (NamingException e) {
+      LOG.warn("Exception trying to get groups for user " + user, e);
+      return emptyResults;
+    }
+
+    int retryCount = 0;
+    while (retryCount ++ < RECONNECT_RETRY_COUNT) {
+      //reset ctx so that new DirContext can be created with new connection
+      this.ctx = null;
+      
+      try {
+        return doGetGroups(user);
+      } catch (CommunicationException e) {
+        LOG.warn("Connection being closed, reconnecting failed, retryCount = " + retryCount);
+      } catch (NamingException e) {
+        LOG.warn("Exception trying to get groups for user " + user, e);
+        return emptyResults;
+      }
+    }
+    
+    return emptyResults;
+  }
+  
+  List<String> doGetGroups(String user) throws NamingException {
     List<String> groups = new ArrayList<String>();
 
-    try {
-      DirContext ctx = getDirContext();
+    DirContext ctx = getDirContext();
 
-      // Search for the user. We'll only ever need to look at the first result
-      NamingEnumeration<SearchResult> results = ctx.search(baseDN,
-                                                           userSearchFilter,
-                                                           new Object[]{user},
-                                                           SEARCH_CONTROLS);
-      if (results.hasMoreElements()) {
-        SearchResult result = results.nextElement();
-        String userDn = result.getNameInNamespace();
+    // Search for the user. We'll only ever need to look at the first result
+    NamingEnumeration<SearchResult> results = ctx.search(baseDN,
+        userSearchFilter,
+        new Object[]{user},
+        SEARCH_CONTROLS);
+    if (results.hasMoreElements()) {
+      SearchResult result = results.nextElement();
+      String userDn = result.getNameInNamespace();
 
-        NamingEnumeration<SearchResult> groupResults =
+      NamingEnumeration<SearchResult> groupResults =
           ctx.search(baseDN,
-                     "(&" + groupSearchFilter + "(" + groupMemberAttr + "={0}))",
-                     new Object[]{userDn},
-                     SEARCH_CONTROLS);
-        while (groupResults.hasMoreElements()) {
-          SearchResult groupResult = groupResults.nextElement();
-          Attribute groupName = groupResult.getAttributes().get(groupNameAttr);
-          groups.add(groupName.get().toString());
-        }
+              "(&" + groupSearchFilter + "(" + groupMemberAttr + "={0}))",
+              new Object[]{userDn},
+              SEARCH_CONTROLS);
+      while (groupResults.hasMoreElements()) {
+        SearchResult groupResult = groupResults.nextElement();
+        Attribute groupName = groupResult.getAttributes().get(groupNameAttr);
+        groups.add(groupName.get().toString());
       }
-    } catch (NamingException e) {
-      LOG.warn("Exception trying to get groups for user " + user, e);
-      return new ArrayList<String>();
     }
 
     return groups;
@@ -236,7 +268,7 @@ public class LdapGroupsMapping
 
     return ctx;
   }
-
+  
   /**
    * Caches groups, no need to do that for this provider
    */

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java?rev=1462698&r1=1462697&r2=1462698&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java Sat Mar 30 03:50:03 2013
@@ -164,7 +164,9 @@ public class FileBasedKeyStoresFactory i
     } else {
       keystore.load(null, null);
     }
-    KeyManagerFactory keyMgrFactory = KeyManagerFactory.getInstance("SunX509");
+    KeyManagerFactory keyMgrFactory = KeyManagerFactory
+        .getInstance(SSLFactory.SSLCERTIFICATE);
+      
     keyMgrFactory.init(keystore, (keystorePassword != null) ?
                                  keystorePassword.toCharArray() : null);
     keyManagers = keyMgrFactory.getKeyManagers();

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java?rev=1462698&r1=1462697&r2=1462698&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java Sat Mar 30 03:50:03 2013
@@ -169,8 +169,8 @@ public final class ReloadingX509TrustMan
       in.close();
     }
 
-    TrustManagerFactory trustManagerFactory =
-      TrustManagerFactory.getInstance("SunX509");
+    TrustManagerFactory trustManagerFactory = 
+      TrustManagerFactory.getInstance(SSLFactory.SSLCERTIFICATE);
     trustManagerFactory.init(ks);
     TrustManager[] trustManagers = trustManagerFactory.getTrustManagers();
     for (TrustManager trustManager1 : trustManagers) {

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java?rev=1462698&r1=1462697&r2=1462698&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java Sat Mar 30 03:50:03 2013
@@ -58,6 +58,9 @@ public class SSLFactory implements Conne
     "hadoop.ssl.client.conf";
   public static final String SSL_SERVER_CONF_KEY =
     "hadoop.ssl.server.conf";
+  private static final boolean IBMJAVA = 
+      System.getProperty("java.vendor").contains("IBM");
+  public static final String SSLCERTIFICATE = IBMJAVA?"ibmX509":"SunX509"; 
 
   public static final boolean DEFAULT_SSL_REQUIRE_CLIENT_CERT = false;
 

Propchange: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/core/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/core:r1460409-1462697

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java?rev=1462698&r1=1462697&r2=1462698&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java Sat Mar 30 03:50:03 2013
@@ -21,6 +21,8 @@ package org.apache.hadoop.fs;
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
 import java.util.EnumSet;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -1164,6 +1166,40 @@ public abstract class FileContextMainOpe
       Assert.assertEquals(fc.getFileStatus(file), fc.getFileLinkStatus(file));
     }
   }
+
+  /**
+   * Test that URIs with a scheme, no authority, and absolute path component
+   * resolve with the authority of the default FS.
+   */
+  @Test(timeout=30000)
+  public void testAbsolutePathSchemeNoAuthority() throws IOException,
+      URISyntaxException {
+    Path file = getTestRootPath(fc, "test/file");
+    createFile(file);
+    URI uri = file.toUri();
+    URI noAuthorityUri = new URI(uri.getScheme(), null, uri.getPath(),
+        uri.getQuery(), uri.getFragment());
+    Path noAuthority = new Path(noAuthorityUri);
+    Assert.assertEquals(fc.getFileStatus(file), fc.getFileStatus(noAuthority));
+  }
+
+  /**
+   * Test that URIs with a scheme, no authority, and relative path component
+   * resolve with the authority of the default FS.
+   */
+  @Test(timeout=30000)
+  public void testRelativePathSchemeNoAuthority() throws IOException,
+      URISyntaxException {
+    Path workDir = new Path(getAbsoluteTestRootPath(fc), new Path("test"));
+    fc.setWorkingDirectory(workDir);
+    Path file = new Path(workDir, "file");
+    createFile(file);
+    URI uri = file.toUri();
+    URI noAuthorityUri = new URI(uri.getScheme() + ":file");
+    System.out.println(noAuthorityUri);
+    Path noAuthority = new Path(noAuthorityUri);
+    Assert.assertEquals(fc.getFileStatus(file), fc.getFileStatus(noAuthority));
+  }
   
   protected void createFile(Path path) throws IOException {
     FSDataOutputStream out = fc.create(path, EnumSet.of(CREATE),

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java?rev=1462698&r1=1462697&r2=1462698&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java Sat Mar 30 03:50:03 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.util.Locale;
 
 import junit.framework.TestCase;
 
@@ -51,7 +52,13 @@ public abstract class FileSystemContract
 
   @Override
   protected void tearDown() throws Exception {
-    fs.delete(path("/test"), true);
+    try {
+      if (fs != null) {
+        fs.delete(path("/test"), true);
+      }
+    } catch (IOException e) {
+      LOG.error("Error deleting /test: " + e, e);
+    }
   }
   
   protected int getBlockSize() {
@@ -62,10 +69,23 @@ public abstract class FileSystemContract
     return "/user/" + System.getProperty("user.name");
   }
 
+  /**
+   * Override this if the filesystem does not support rename
+   * @return true if the FS supports rename -and rename related tests
+   * should be run
+   */
   protected boolean renameSupported() {
     return true;
   }
 
+  /**
+   * Override this if the filesystem is not case sensitive
+   * @return true if the case detection/preservation tests should run
+   */
+  protected boolean filesystemIsCaseSensitive() {
+    return true;
+  }
+
   public void testFsStatus() throws Exception {
     FsStatus fsStatus = fs.getStatus();
     assertNotNull(fsStatus);
@@ -109,6 +129,7 @@ public abstract class FileSystemContract
     assertTrue(fs.mkdirs(testDir));
 
     assertTrue(fs.exists(testDir));
+    assertTrue("Should be a directory", fs.isDirectory(testDir));
     assertFalse(fs.isFile(testDir));
 
     Path parentDir = testDir.getParent();
@@ -118,17 +139,17 @@ public abstract class FileSystemContract
     Path grandparentDir = parentDir.getParent();
     assertTrue(fs.exists(grandparentDir));
     assertFalse(fs.isFile(grandparentDir));
-    
+
   }
-  
+
   public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
     Path testDir = path("/test/hadoop");
     assertFalse(fs.exists(testDir));
     assertTrue(fs.mkdirs(testDir));
     assertTrue(fs.exists(testDir));
-    
+
     createFile(path("/test/hadoop/file"));
-    
+
     Path testSubDir = path("/test/hadoop/file/subdir");
     try {
       fs.mkdirs(testSubDir);
@@ -137,7 +158,7 @@ public abstract class FileSystemContract
       // expected
     }
     assertFalse(fs.exists(testSubDir));
-    
+
     Path testDeepSubDir = path("/test/hadoop/file/deep/sub/dir");
     try {
       fs.mkdirs(testDeepSubDir);
@@ -146,7 +167,7 @@ public abstract class FileSystemContract
       // expected
     }
     assertFalse(fs.exists(testDeepSubDir));
-    
+
   }
 
   public void testMkdirsWithUmask() throws Exception {
@@ -177,7 +198,7 @@ public abstract class FileSystemContract
       // expected
     }
   }
-  
+
   public void testListStatusThrowsExceptionForNonExistentFile() throws Exception {
     try {
       fs.listStatus(path("/test/hadoop/file"));
@@ -186,7 +207,7 @@ public abstract class FileSystemContract
       // expected
     }
   }
-  
+
   public void testListStatus() throws Exception {
     Path[] testDirs = { path("/test/hadoop/a"),
                         path("/test/hadoop/b"),
@@ -210,7 +231,7 @@ public abstract class FileSystemContract
     paths = fs.listStatus(path("/test/hadoop/a"));
     assertEquals(0, paths.length);
   }
-  
+
   public void testWriteReadAndDeleteEmptyFile() throws Exception {
     writeReadAndDelete(0);
   }
@@ -222,7 +243,7 @@ public abstract class FileSystemContract
   public void testWriteReadAndDeleteOneBlock() throws Exception {
     writeReadAndDelete(getBlockSize());
   }
-  
+
   public void testWriteReadAndDeleteOneAndAHalfBlocks() throws Exception {
     writeReadAndDelete(getBlockSize() + (getBlockSize() / 2));
   }
@@ -365,8 +386,7 @@ public abstract class FileSystemContract
     Path dst = path("/test/new/newdir");
     fs.mkdirs(dst);
     rename(src, dst, true, false, true);
-    assertTrue("Destination changed",
-        fs.exists(path("/test/new/newdir/file")));
+    assertIsFile(path("/test/new/newdir/file"));
   }
   
   public void testRenameDirectoryMoveToNonExistentDirectory() 
@@ -466,9 +486,9 @@ public abstract class FileSystemContract
   
   private void rename(Path src, Path dst, boolean renameSucceeded,
       boolean srcExists, boolean dstExists) throws IOException {
-    assertEquals("Rename result", renameSucceeded, fs.rename(src, dst));
-    assertEquals("Source exists", srcExists, fs.exists(src));
-    assertEquals("Destination exists", dstExists, fs.exists(dst));
+    assertEquals("mv " + src + " " + dst,renameSucceeded, fs.rename(src, dst));
+    assertEquals("Source exists: " + src, srcExists, fs.exists(src));
+    assertEquals("Destination exists" + dst, dstExists, fs.exists(dst));
   }
 
   /**
@@ -495,6 +515,253 @@ public abstract class FileSystemContract
   }
 
   /**
+   * Assert that a filesystem is case sensitive.
+   * This is done by creating a mixed-case filename and asserting that
+   * its lower case version is not there.
+   * @throws Exception
+   */
+  public void testFilesystemIsCaseSensitive() throws Exception {
+    if (!filesystemIsCaseSensitive()) {
+      LOG.info("Skipping test");
+      return;
+    }
+    String mixedCaseFilename = "/test/UPPER.TXT";
+    Path upper = path(mixedCaseFilename);
+    Path lower = path(mixedCaseFilename.toLowerCase(Locale.ENGLISH));
+    assertFalse("File exists" + upper, fs.exists(upper));
+    assertFalse("File exists" + lower, fs.exists(lower));
+    FSDataOutputStream out = fs.create(upper);
+    out.writeUTF("UPPER");
+    out.close();
+    FileStatus upperStatus = fs.getFileStatus(upper);
+    assertTrue("File does not exist" + upper, fs.exists(upper));
+    //verify the lower-case version of the filename doesn't exist
+    assertFalse("File exists" + lower, fs.exists(lower));
+    //now overwrite the lower case version of the filename with a
+    //new version.
+    out = fs.create(lower);
+    out.writeUTF("l");
+    out.close();
+    assertTrue("File does not exist" + lower, fs.exists(lower));
+    //verify the length of the upper file hasn't changed
+    FileStatus newStatus = fs.getFileStatus(upper);
+    assertEquals("Expected status:" + upperStatus
+                 + " actual status " + newStatus,
+                 upperStatus.getLen(),
+                 newStatus.getLen()); }
+
+  /**
+   * Asserts that a zero byte file has a status of file and not
+   * directory or symlink
+   * @throws Exception on failures
+   */
+  public void testZeroByteFilesAreFiles() throws Exception {
+    Path src = path("/test/testZeroByteFilesAreFiles");
+    //create a zero byte file
+    FSDataOutputStream out = fs.create(src);
+    out.close();
+    assertIsFile(src);
+  }
+
+  /**
+   * Asserts that a zero byte file has a status of file and not
+   * directory or symlink
+   * @throws Exception on failures
+   */
+  public void testMultiByteFilesAreFiles() throws Exception {
+    Path src = path("/test/testMultiByteFilesAreFiles");
+    FSDataOutputStream out = fs.create(src);
+    out.writeUTF("testMultiByteFilesAreFiles");
+    out.close();
+    assertIsFile(src);
+  }
+
+  /**
+   * Assert that root directory renames are not allowed
+   * @throws Exception on failures
+   */
+  public void testRootDirAlwaysExists() throws Exception {
+    //this will throw an exception if the path is not found
+    fs.getFileStatus(path("/"));
+    //this catches overrides of the base exists() method that don't
+    //use getFileStatus() as an existence probe
+    assertTrue("FileSystem.exists() fails for root", fs.exists(path("/")));
+  }
+
+  /**
+   * Assert that root directory renames are not allowed
+   * @throws Exception on failures
+   */
+  public void testRenameRootDirForbidden() throws Exception {
+    if (!renameSupported()) return;
+
+    rename(path("/"),
+           path("/test/newRootDir"),
+           false, true, false);
+  }
+
+  /**
+   * Assert that renaming a parent directory to be a child
+   * of itself is forbidden
+   * @throws Exception on failures
+   */
+  public void testRenameChildDirForbidden() throws Exception {
+    if (!renameSupported()) return;
+    LOG.info("testRenameChildDirForbidden");
+    Path parentdir = path("/test/parentdir");
+    fs.mkdirs(parentdir);
+    Path childFile = new Path(parentdir, "childfile");
+    createFile(childFile);
+    //verify one level down
+    Path childdir = new Path(parentdir, "childdir");
+    rename(parentdir, childdir, false, true, false);
+    //now another level
+    fs.mkdirs(childdir);
+    Path childchilddir = new Path(childdir, "childdir");
+    rename(parentdir, childchilddir, false, true, false);
+  }
+
+  /**
+   * This a sanity check to make sure that any filesystem's handling of
+   * renames doesn't cause any regressions
+   */
+  public void testRenameToDirWithSamePrefixAllowed() throws Throwable {
+    if (!renameSupported()) return;
+    Path parentdir = path("test/parentdir");
+    fs.mkdirs(parentdir);
+    Path dest = path("test/parentdirdest");
+    rename(parentdir, dest, true, false, true);
+  }
+
+  /**
+   * trying to rename a directory onto itself should fail,
+   * preserving everything underneath.
+   */
+  public void testRenameDirToSelf() throws Throwable {
+    if (!renameSupported()) {
+      return;
+    }
+    Path parentdir = path("test/parentdir");
+    fs.mkdirs(parentdir);
+    Path child = new Path(parentdir, "child");
+    createFile(child);
+
+    rename(parentdir, parentdir, false, true, true);
+    //verify the child is still there
+    assertIsFile(child);
+  }
+
+  /**
+   * trying to rename a directory onto its parent dir will build
+   * a destination path of its original name, which should then fail.
+   * The source path and the destination path should still exist afterwards
+   */
+  public void testMoveDirUnderParent() throws Throwable {
+    if (!renameSupported()) {
+      return;
+    }
+    Path testdir = path("test/dir");
+    fs.mkdirs(testdir);
+    Path parent = testdir.getParent();
+    //the outcome here is ambiguous, so is not checked
+    fs.rename(testdir, parent);
+    assertEquals("Source exists: " + testdir, true, fs.exists(testdir));
+    assertEquals("Destination exists" + parent, true, fs.exists(parent));
+  }
+
+  /**
+   * trying to rename a file onto itself should succeed (it's a no-op)
+   *
+   */
+  public void testRenameFileToSelf() throws Throwable {
+    if (!renameSupported()) return;
+    Path filepath = path("test/file");
+    createFile(filepath);
+    //HDFS expects rename src, src -> true
+    rename(filepath, filepath, true, true, true);
+    //verify the file is still there
+    assertIsFile(filepath);
+  }
+
+  /**
+   * trying to move a file into it's parent dir should succeed
+   * again: no-op
+   */
+  public void testMoveFileUnderParent() throws Throwable {
+    if (!renameSupported()) return;
+    Path filepath = path("test/file");
+    createFile(filepath);
+    //HDFS expects rename src, src -> true
+    rename(filepath, filepath, true, true, true);
+    //verify the file is still there
+    assertIsFile(filepath);
+  }
+
+  public void testLSRootDir() throws Throwable {
+    Path dir = path("/");
+    Path child = path("/test");
+    createFile(child);
+    assertListFilesFinds(dir, child);
+  }
+
+  public void testListStatusRootDir() throws Throwable {
+    Path dir = path("/");
+    Path child  = path("/test");
+    createFile(child);
+    assertListStatusFinds(dir, child);
+  }
+
+  private void assertListFilesFinds(Path dir, Path subdir) throws IOException {
+    RemoteIterator<LocatedFileStatus> iterator =
+      fs.listFiles(dir, true);
+    boolean found = false;
+    StringBuilder builder = new StringBuilder();
+    while (iterator.hasNext()) {
+      LocatedFileStatus next =  iterator.next();
+      builder.append(next.toString()).append('\n');
+      if (next.getPath().equals(subdir)) {
+        found = true;
+      }
+    }
+    assertTrue("Path " + subdir
+               + " not found in directory " + dir + ":" + builder,
+               found);
+  }
+
+  private void assertListStatusFinds(Path dir, Path subdir) throws IOException {
+    FileStatus[] stats = fs.listStatus(dir);
+    boolean found = false;
+    StringBuilder builder = new StringBuilder();
+    for (FileStatus stat : stats) {
+      builder.append(stat.toString()).append('\n');
+      if (stat.getPath().equals(subdir)) {
+        found = true;
+      }
+    }
+    assertTrue("Path " + subdir
+               + " not found in directory " + dir + ":" + builder,
+               found);
+  }
+
+
+  /**
+   * Assert that a file exists and whose {@link FileStatus} entry
+   * declares that this is a file and not a symlink or directory.
+   * @param filename name of the file
+   * @throws IOException IO problems during file operations
+   */
+  private void assertIsFile(Path filename) throws IOException {
+    assertTrue("Does not exist: " + filename, fs.exists(filename));
+    FileStatus status = fs.getFileStatus(filename);
+    String fileInfo = filename + "  " + status;
+    assertTrue("Not a file " + fileInfo, status.isFile());
+    assertFalse("File claims to be a symlink " + fileInfo,
+                status.isSymlink());
+    assertFalse("File claims to be a directory " + fileInfo,
+                status.isDirectory());
+  }
+
+  /**
    *
    * Write a file and read it in, validating the result. Optional flags control
    * whether file overwrite operations should be enabled, and whether the

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCanonicalization.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCanonicalization.java?rev=1462698&r1=1462697&r2=1462698&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCanonicalization.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCanonicalization.java Sat Mar 30 03:50:03 2013
@@ -26,6 +26,7 @@ import java.net.URI;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.NetUtilsTestResolver;
 import org.apache.hadoop.util.Progressable;
 import org.junit.BeforeClass;
@@ -313,6 +314,11 @@ public class TestFileSystemCanonicalizat
     }
     
     @Override
+    protected URI canonicalizeUri(URI uri) {
+      return NetUtils.getCanonicalUri(uri, getDefaultPort());
+    }
+
+    @Override
     public FSDataInputStream open(Path f, int bufferSize) throws IOException {
       throw new IOException("not supposed to be here");
     }

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java?rev=1462698&r1=1462697&r2=1462698&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java Sat Mar 30 03:50:03 2013
@@ -19,6 +19,7 @@ package org.apache.hadoop.fs;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem.Statistics;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.Shell;
 
 import static org.apache.hadoop.fs.FileSystemTestHelper.*;
@@ -266,9 +267,14 @@ public class TestLocalFileSystem {
     LocalFileSystem fs = FileSystem.getLocal(conf);
     Path path = new Path(TEST_ROOT_DIR, "test-file");
     writeFile(fs, path, 1);
-    BufferedFSInputStream bis = new BufferedFSInputStream(
-        new RawLocalFileSystem().new LocalFSFileInputStream(path), 1024);
-    assertNotNull(bis.getFileDescriptor());
+    BufferedFSInputStream bis = null;
+    try {
+      bis = new BufferedFSInputStream(new RawLocalFileSystem()
+        .new LocalFSFileInputStream(path), 1024);
+      assertNotNull(bis.getFileDescriptor());
+    } finally {
+      IOUtils.cleanup(null, bis);
+    }
   }
 
   @Test

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3/InMemoryFileSystemStore.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3/InMemoryFileSystemStore.java?rev=1462698&r1=1462697&r2=1462698&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3/InMemoryFileSystemStore.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3/InMemoryFileSystemStore.java Sat Mar 30 03:50:03 2013
@@ -50,6 +50,7 @@ class InMemoryFileSystemStore implements
   @Override
   public void initialize(URI uri, Configuration conf) {
     this.conf = conf;
+    inodes.put(new Path("/"), INode.DIRECTORY_INODE);
   }
   
   @Override

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java?rev=1462698&r1=1462697&r2=1462698&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java Sat Mar 30 03:50:03 2013
@@ -51,7 +51,7 @@ public abstract class NativeS3FileSystem
   
   public void testListStatusForRoot() throws Exception {
     FileStatus[] paths = fs.listStatus(path("/"));
-    assertEquals(0, paths.length);
+    assertEquals("Root directory is not empty; ", 0, paths.length);
     
     Path testDir = path("/test");
     assertTrue(fs.mkdirs(testDir));

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java?rev=1462698&r1=1462697&r2=1462698&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java Sat Mar 30 03:50:03 2013
@@ -25,6 +25,7 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.ipc.Server.Connection;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.net.ConnectTimeoutException;
 import org.apache.hadoop.net.NetUtils;
@@ -521,10 +522,52 @@ public class TestIPC {
   }
   
   /**
+   * Check service class byte in IPC header is correct on wire.
+   */
+  @Test(timeout=60000)
+  public void testIpcWithServiceClass() throws Exception {
+    // start server
+    Server server = new TestServer(5, false);
+    InetSocketAddress addr = NetUtils.getConnectAddress(server);
+    server.start();
+
+    // start client
+    Client.setConnectTimeout(conf, 10000);
+
+    callAndVerify(server, addr, 0, true);
+    // Service Class is low to -128 as byte on wire.
+    // -128 shouldn't be casted on wire but -129 should.
+    callAndVerify(server, addr, -128, true);
+    callAndVerify(server, addr, -129, false);
+
+    // Service Class is up to 127.
+    // 127 shouldn't be casted on wire but 128 should.
+    callAndVerify(server, addr, 127, true);
+    callAndVerify(server, addr, 128, false);
+
+    server.stop();
+  }
+
+  /**
+   * Make a call from a client and verify if header info is changed in server side
+   */
+  private void callAndVerify(Server server, InetSocketAddress addr,
+      int serviceClass, boolean noChanged) throws Exception{
+    Client client = new Client(LongWritable.class, conf);
+
+    client.call(new LongWritable(RANDOM.nextLong()),
+        addr, null, null, MIN_SLEEP_TIME, serviceClass, conf);
+    Connection connection = server.getConnections().get(0);
+    int serviceClass2 = connection.getServiceClass();
+    assertFalse(noChanged ^ serviceClass == serviceClass2);
+    client.stop();
+  }
+
+  /**
    * Check that file descriptors aren't leaked by starting
    * and stopping IPC servers.
    */
-  @Test
+  @Test(timeout=60000)
   public void testSocketLeak() throws Exception {
     Assume.assumeTrue(FD_DIR.exists()); // only run on Linux
 

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java?rev=1462698&r1=1462697&r2=1462698&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java Sat Mar 30 03:50:03 2013
@@ -26,6 +26,7 @@ import java.io.Writer;
 import java.util.Arrays;
 import java.util.List;
 
+import javax.naming.CommunicationException;
 import javax.naming.NamingEnumeration;
 import javax.naming.NamingException;
 import javax.naming.directory.Attribute;
@@ -46,21 +47,15 @@ public class TestLdapGroupsMapping {
   private DirContext mockContext;
   
   private LdapGroupsMapping mappingSpy = spy(new LdapGroupsMapping());
+  private NamingEnumeration mockUserNamingEnum = mock(NamingEnumeration.class);
+  private NamingEnumeration mockGroupNamingEnum = mock(NamingEnumeration.class);
+  private String[] testGroups = new String[] {"group1", "group2"};
   
   @Before
   public void setupMocks() throws NamingException {
     mockContext = mock(DirContext.class);
     doReturn(mockContext).when(mappingSpy).getDirContext();
-    
-    NamingEnumeration mockUserNamingEnum = mock(NamingEnumeration.class);
-    NamingEnumeration mockGroupNamingEnum = mock(NamingEnumeration.class);
-    
-    // The search functionality of the mock context is reused, so we will
-    // return the user NamingEnumeration first, and then the group
-    when(mockContext.search(anyString(), anyString(), any(Object[].class),
-        any(SearchControls.class)))
-        .thenReturn(mockUserNamingEnum, mockGroupNamingEnum);
-    
+            
     SearchResult mockUserResult = mock(SearchResult.class);
     // We only ever call hasMoreElements once for the user NamingEnum, so 
     // we can just have one return value
@@ -76,23 +71,57 @@ public class TestLdapGroupsMapping {
     
     // Define the attribute for the name of the first group
     Attribute group1Attr = new BasicAttribute("cn");
-    group1Attr.add("group1");
+    group1Attr.add(testGroups[0]);
     Attributes group1Attrs = new BasicAttributes();
     group1Attrs.put(group1Attr);
     
     // Define the attribute for the name of the second group
     Attribute group2Attr = new BasicAttribute("cn");
-    group2Attr.add("group2");
+    group2Attr.add(testGroups[1]);
     Attributes group2Attrs = new BasicAttributes();
     group2Attrs.put(group2Attr);
     
     // This search result gets reused, so return group1, then group2
     when(mockGroupResult.getAttributes()).thenReturn(group1Attrs, group2Attrs);
-    
   }
   
   @Test
   public void testGetGroups() throws IOException, NamingException {
+    // The search functionality of the mock context is reused, so we will
+    // return the user NamingEnumeration first, and then the group
+    when(mockContext.search(anyString(), anyString(), any(Object[].class),
+        any(SearchControls.class)))
+        .thenReturn(mockUserNamingEnum, mockGroupNamingEnum);
+    
+    doTestGetGroups(Arrays.asList(testGroups), 2);
+  }
+
+  @Test
+  public void testGetGroupsWithConnectionClosed() throws IOException, NamingException {
+    // The case mocks connection is closed/gc-ed, so the first search call throws CommunicationException,
+    // then after reconnected return the user NamingEnumeration first, and then the group
+    when(mockContext.search(anyString(), anyString(), any(Object[].class),
+        any(SearchControls.class)))
+        .thenThrow(new CommunicationException("Connection is closed"))
+        .thenReturn(mockUserNamingEnum, mockGroupNamingEnum);
+    
+    // Although connection is down but after reconnected it still should retrieve the result groups
+    doTestGetGroups(Arrays.asList(testGroups), 1 + 2); // 1 is the first failure call 
+  }
+
+  @Test
+  public void testGetGroupsWithLdapDown() throws IOException, NamingException {
+    // This mocks the case where Ldap server is down, and always throws CommunicationException 
+    when(mockContext.search(anyString(), anyString(), any(Object[].class),
+        any(SearchControls.class)))
+        .thenThrow(new CommunicationException("Connection is closed"));
+    
+    // Ldap server is down, no groups should be retrieved
+    doTestGetGroups(Arrays.asList(new String[] {}), 
+        1 + LdapGroupsMapping.RECONNECT_RETRY_COUNT); // 1 is the first normal call
+  }
+  
+  private void doTestGetGroups(List<String> expectedGroups, int searchTimes) throws IOException, NamingException {  
     Configuration conf = new Configuration();
     // Set this, so we don't throw an exception
     conf.set(LdapGroupsMapping.LDAP_URL_KEY, "ldap://test");
@@ -102,10 +131,10 @@ public class TestLdapGroupsMapping {
     // regardless of input
     List<String> groups = mappingSpy.getGroups("some_user");
     
-    Assert.assertEquals(Arrays.asList("group1", "group2"), groups);
+    Assert.assertEquals(expectedGroups, groups);
     
     // We should have searched for a user, and then two groups
-    verify(mockContext, times(2)).search(anyString(),
+    verify(mockContext, times(searchTimes)).search(anyString(),
                                          anyString(),
                                          any(Object[].class),
                                          any(SearchControls.class));

Modified: hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java?rev=1462698&r1=1462697&r2=1462698&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java Sat Mar 30 03:50:03 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.test;
 import java.io.File;
 import java.io.IOException;
 import java.io.StringWriter;
+import java.lang.reflect.InvocationTargetException;
 import java.util.Arrays;
 import java.util.Random;
 import java.util.Set;
@@ -266,15 +267,29 @@ public abstract class GenericTestUtils {
    */
   public static class DelegateAnswer implements Answer<Object> { 
     private final Object delegate;
+    private final Log log;
     
     public DelegateAnswer(Object delegate) {
+      this(null, delegate);
+    }
+    
+    public DelegateAnswer(Log log, Object delegate) {
+      this.log = log;
       this.delegate = delegate;
     }
 
     @Override
     public Object answer(InvocationOnMock invocation) throws Throwable {
-      return invocation.getMethod().invoke(
-          delegate, invocation.getArguments());
+      try {
+        if (log != null) {
+          log.info("Call to " + invocation + " on " + delegate,
+              new Exception("TRACE"));
+        }
+        return invocation.getMethod().invoke(
+            delegate, invocation.getArguments());
+      } catch (InvocationTargetException ite) {
+        throw ite.getCause();
+      }
     }
   }
 



Mime
View raw message