hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a..@apache.org
Subject svn commit: r1196458 [2/3] - in /hadoop/common/branches/HDFS-1623: common/ hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authent...
Date Wed, 02 Nov 2011 05:34:59 GMT
Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java Wed Nov  2 05:34:31 2011
@@ -123,7 +123,7 @@ public class HttpServer implements Filte
 
   public HttpServer(String name, String bindAddress, int port,
       boolean findPort, Configuration conf, Connector connector) throws IOException {
-    this(name, bindAddress, port, findPort, conf, null, connector);
+    this(name, bindAddress, port, findPort, conf, null, connector, null);
   }
 
   /**
@@ -142,11 +142,7 @@ public class HttpServer implements Filte
    */
   public HttpServer(String name, String bindAddress, int port,
       boolean findPort, Configuration conf, String[] pathSpecs) throws IOException {
-    this(name, bindAddress, port, findPort, conf, null, null);
-    for (String path : pathSpecs) {
-        LOG.info("adding path spec: " + path);
-      addFilterPathMapping(path, webAppContext);
-    }
+    this(name, bindAddress, port, findPort, conf, null, null, pathSpecs);
   }
   
   /**
@@ -160,19 +156,20 @@ public class HttpServer implements Filte
    */
   public HttpServer(String name, String bindAddress, int port,
       boolean findPort, Configuration conf) throws IOException {
-    this(name, bindAddress, port, findPort, conf, null, null);
+    this(name, bindAddress, port, findPort, conf, null, null, null);
   }
 
   public HttpServer(String name, String bindAddress, int port,
       boolean findPort, Configuration conf, AccessControlList adminsAcl) 
       throws IOException {
-    this(name, bindAddress, port, findPort, conf, adminsAcl, null);
+    this(name, bindAddress, port, findPort, conf, adminsAcl, null, null);
   }
-  
+
   /**
    * Create a status server on the given port.
    * The jsp scripts are taken from src/webapps/<name>.
    * @param name The name of the server
+   * @param bindAddress The address for this server
    * @param port The port to use on the server
    * @param findPort whether the server should start at the given port and 
    *        increment by 1 until it finds a free port.
@@ -182,6 +179,26 @@ public class HttpServer implements Filte
   public HttpServer(String name, String bindAddress, int port,
       boolean findPort, Configuration conf, AccessControlList adminsAcl, 
       Connector connector) throws IOException {
+    this(name, bindAddress, port, findPort, conf, adminsAcl, connector, null);
+  }
+
+  /**
+   * Create a status server on the given port.
+   * The jsp scripts are taken from src/webapps/<name>.
+   * @param name The name of the server
+   * @param bindAddress The address for this server
+   * @param port The port to use on the server
+   * @param findPort whether the server should start at the given port and 
+   *        increment by 1 until it finds a free port.
+   * @param conf Configuration 
+   * @param adminsAcl {@link AccessControlList} of the admins
+   * @param connector A jetty connection listener
+   * @param pathSpecs Path specifications that this httpserver will be serving. 
+   *        These will be added to any filters.
+   */
+  public HttpServer(String name, String bindAddress, int port,
+      boolean findPort, Configuration conf, AccessControlList adminsAcl, 
+      Connector connector, String[] pathSpecs) throws IOException {
     webServer = new Server();
     this.findPort = findPort;
     this.adminsAcl = adminsAcl;
@@ -230,7 +247,15 @@ public class HttpServer implements Filte
         c.initFilter(this, conf);
       }
     }
+
     addDefaultServlets();
+
+    if (pathSpecs != null) {
+      for (String path : pathSpecs) {
+        LOG.info("adding path spec: " + path);
+        addFilterPathMapping(path, webAppContext);
+      }
+    }
   }
 
   /**

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java Wed Nov  2 05:34:31 2011
@@ -151,16 +151,7 @@ public class WritableComparator implemen
   /** Lexicographic order of binary data. */
   public static int compareBytes(byte[] b1, int s1, int l1,
                                  byte[] b2, int s2, int l2) {
-    int end1 = s1 + l1;
-    int end2 = s2 + l2;
-    for (int i = s1, j = s2; i < end1 && j < end2; i++, j++) {
-      int a = (b1[i] & 0xff);
-      int b = (b2[j] & 0xff);
-      if (a != b) {
-        return a - b;
-      }
-    }
-    return l1 - l2;
+    return FastByteComparisons.compareTo(b1, s1, l1, b2, s2, l2);
   }
 
   /** Compute hash for binary data. */

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java Wed Nov  2 05:34:31 2011
@@ -46,10 +46,41 @@ public class NativeIO {
   public static final int O_FSYNC = O_SYNC;
   public static final int O_NDELAY = O_NONBLOCK;
 
+  // Flags for posix_fadvise() from bits/fcntl.h
+  /* No further special treatment.  */
+  public static final int POSIX_FADV_NORMAL = 0; 
+  /* Expect random page references.  */
+  public static final int POSIX_FADV_RANDOM = 1; 
+  /* Expect sequential page references.  */
+  public static final int POSIX_FADV_SEQUENTIAL = 2; 
+  /* Will need these pages.  */
+  public static final int POSIX_FADV_WILLNEED = 3; 
+  /* Don't need these pages.  */
+  public static final int POSIX_FADV_DONTNEED = 4; 
+  /* Data will be accessed once.  */
+  public static final int POSIX_FADV_NOREUSE = 5; 
+
+
+  /* Wait upon writeout of all pages
+     in the range before performing the
+     write.  */
+  public static final int SYNC_FILE_RANGE_WAIT_BEFORE = 1;
+  /* Initiate writeout of all those
+     dirty pages in the range which are
+     not presently under writeback.  */
+  public static final int SYNC_FILE_RANGE_WRITE = 2;
+
+  /* Wait upon writeout of all pages in
+     the range after performing the
+     write.  */
+  public static final int SYNC_FILE_RANGE_WAIT_AFTER = 4;
+
   private static final Log LOG = LogFactory.getLog(NativeIO.class);
 
   private static boolean nativeLoaded = false;
   private static boolean workaroundNonThreadSafePasswdCalls = false;
+  private static boolean fadvisePossible = true;
+  private static boolean syncFileRangePossible = true;
 
   static final String WORKAROUND_NON_THREADSAFE_CALLS_KEY =
     "hadoop.workaround.non.threadsafe.getpwuid";
@@ -88,9 +119,58 @@ public class NativeIO {
   /** Wrapper around chmod(2) */
   public static native void chmod(String path, int mode) throws IOException;
 
+  /** Wrapper around posix_fadvise(2) */
+  static native void posix_fadvise(
+    FileDescriptor fd, long offset, long len, int flags) throws NativeIOException;
+
+  /** Wrapper around sync_file_range(2) */
+  static native void sync_file_range(
+    FileDescriptor fd, long offset, long nbytes, int flags) throws NativeIOException;
+
   /** Initialize the JNI method ID and class ID cache */
   private static native void initNative();
 
+  /**
+   * Call posix_fadvise on the given file descriptor. See the manpage
+   * for this syscall for more information. On systems where this
+   * call is not available, does nothing.
+   *
+   * @throws NativeIOException if there is an error with the syscall
+   */
+  public static void posixFadviseIfPossible(
+      FileDescriptor fd, long offset, long len, int flags)
+      throws NativeIOException {
+    if (nativeLoaded && fadvisePossible) {
+      try {
+        posix_fadvise(fd, offset, len, flags);
+      } catch (UnsupportedOperationException uoe) {
+        fadvisePossible = false;
+      } catch (UnsatisfiedLinkError ule) {
+        fadvisePossible = false;
+      }
+    }
+  }
+
+  /**
+   * Call sync_file_range on the given file descriptor. See the manpage
+   * for this syscall for more information. On systems where this
+   * call is not available, does nothing.
+   *
+   * @throws NativeIOException if there is an error with the syscall
+   */
+  public static void syncFileRangeIfPossible(
+      FileDescriptor fd, long offset, long nbytes, int flags)
+      throws NativeIOException {
+    if (nativeLoaded && syncFileRangePossible) {
+      try {
+        sync_file_range(fd, offset, nbytes, flags);
+      } catch (UnsupportedOperationException uoe) {
+        syncFileRangePossible = false;
+      } catch (UnsatisfiedLinkError ule) {
+        syncFileRangePossible = false;
+      }
+    }
+  }
 
   /**
    * Result type of the fstat call

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java Wed Nov  2 05:34:31 2011
@@ -489,7 +489,8 @@ public class RPC {
       }
     } else {
       LOG.error("Could not get invocation handler " + invocationHandler +
-          " for proxy " + proxy + ", or invocation handler is not closeable.");
+          " for proxy class " + (proxy == null ? null : proxy.getClass()) +
+          ", or invocation handler is not closeable.");
     }
   }
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcServerException.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcServerException.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcServerException.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcServerException.java Wed Nov  2 05:34:31 2011
@@ -25,10 +25,9 @@ public class RpcServerException extends 
 
   /**
    * Constructs exception with the specified detail message.
-   * 
-   * @param messages detailed message.
+   * @param message detailed message.
    */
-  RpcServerException(final String message) {
+  public RpcServerException(final String message) {
     super(message);
   }
   
@@ -36,12 +35,11 @@ public class RpcServerException extends 
    * Constructs exception with the specified detail message and cause.
    * 
    * @param message message.
-   * @param cause that cause this exception
    * @param cause the cause (can be retried by the {@link #getCause()} method).
    *          (A <tt>null</tt> value is permitted, and indicates that the cause
    *          is nonexistent or unknown.)
    */
-  RpcServerException(final String message, final Throwable cause) {
+  public RpcServerException(final String message, final Throwable cause) {
     super(message, cause);
   }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java Wed Nov  2 05:34:31 2011
@@ -102,6 +102,23 @@ public abstract class Server {
    */
   public static final ByteBuffer HEADER = ByteBuffer.wrap("hrpc".getBytes());
   
+  /**
+   * If the user accidentally sends an HTTP GET to an IPC port, we detect this
+   * and send back a nicer response.
+   */
+  private static final ByteBuffer HTTP_GET_BYTES = ByteBuffer.wrap(
+      "GET ".getBytes());
+  
+  /**
+   * An HTTP response to send back if we detect an HTTP request to our IPC
+   * port.
+   */
+  static final String RECEIVED_HTTP_REQ_RESPONSE =
+    "HTTP/1.1 404 Not Found\r\n" +
+    "Content-type: text/plain\r\n\r\n" +
+    "It looks like you are making an HTTP request to a Hadoop IPC port. " +
+    "This is not the correct port for the web interface on this daemon.\r\n";
+  
   // 1 : Introduce ping and server does not throw away RPCs
   // 3 : Introduce the protocol into the RPC connection header
   // 4 : Introduced SASL security layer
@@ -910,6 +927,7 @@ public abstract class Server {
     private ByteArrayOutputStream authFailedResponse = new ByteArrayOutputStream();
     // Fake 'call' for SASL context setup
     private static final int SASL_CALLID = -33;
+    
     private final Call saslCall = new Call(SASL_CALLID, null, this);
     private final ByteArrayOutputStream saslResponse = new ByteArrayOutputStream();
     
@@ -1142,7 +1160,7 @@ public abstract class Server {
           if (count < 0 || dataLengthBuffer.remaining() > 0) 
             return count;
         }
-      
+        
         if (!rpcHeaderRead) {
           //Every connection is expected to send the header.
           if (rpcHeaderBuffer == null) {
@@ -1156,7 +1174,16 @@ public abstract class Server {
           byte[] method = new byte[] {rpcHeaderBuffer.get(1)};
           authMethod = AuthMethod.read(new DataInputStream(
               new ByteArrayInputStream(method)));
-          dataLengthBuffer.flip();          
+          dataLengthBuffer.flip();
+          
+          // Check if it looks like the user is hitting an IPC port
+          // with an HTTP GET - this is a common error, so we can
+          // send back a simple string indicating as much.
+          if (HTTP_GET_BYTES.equals(dataLengthBuffer)) {
+            setupHttpRequestOnIpcPortResponse();
+            return -1;
+          }
+        
           if (!HEADER.equals(dataLengthBuffer) || version != CURRENT_VERSION) {
             //Warning is ok since this is not supposed to happen.
             LOG.warn("Incorrect header or version mismatch from " + 
@@ -1171,8 +1198,12 @@ public abstract class Server {
             throw new IOException("Unable to read authentication method");
           }
           if (isSecurityEnabled && authMethod == AuthMethod.SIMPLE) {
-            AccessControlException ae = new AccessControlException(
-                "Authentication is required");
+            AccessControlException ae = new AccessControlException("Authorization ("
+              + CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION
+              + ") is enabled but authentication ("
+              + CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION
+              + ") is configured as simple. Please configure another method "
+              + "like kerberos or digest.");
             setupResponse(authFailedResponse, authFailedCall, Status.FATAL,
                 null, ae.getClass().getName(), ae.getMessage());
             responder.doRespond(authFailedCall);
@@ -1271,6 +1302,13 @@ public abstract class Server {
         responder.doRespond(fakeCall);
       }
     }
+    
+    private void setupHttpRequestOnIpcPortResponse() throws IOException {
+      Call fakeCall =  new Call(0, null, this);
+      fakeCall.setResponse(ByteBuffer.wrap(
+          RECEIVED_HTTP_REQ_RESPONSE.getBytes()));
+      responder.doRespond(fakeCall);
+    }
 
     /// Reads the connection header following version
     private void processHeader(byte[] buf) throws IOException {
@@ -1773,6 +1811,16 @@ public abstract class Server {
   }
   
   /**
+   * Get the port on which the IPC Server is listening for incoming connections.
+   * This could be an ephemeral port too, in which case we return the real
+   * port on which the Server has bound.
+   * @return port on which IPC Server is listening
+   */
+  public int getPort() {
+    return port;
+  }
+  
+  /**
    * The number of open RPC conections
    * @return the number of open rpc connections
    */

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java Wed Nov  2 05:34:31 2011
@@ -168,6 +168,7 @@ public class JMXJsonServlet extends Http
           if (splitStrings.length != 2) {
             jg.writeStringField("result", "ERROR");
             jg.writeStringField("message", "query format is not as expected.");
+            jg.flush();
             response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
             return;
           }

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java Wed Nov  2 05:34:31 2011
@@ -37,14 +37,18 @@ import org.apache.hadoop.classification.
 public class CachedDNSToSwitchMapping implements DNSToSwitchMapping {
   private Map<String, String> cache = new ConcurrentHashMap<String, String>();
   protected DNSToSwitchMapping rawMapping;
-  
+
+  /**
+   * cache a raw DNS mapping
+   * @param rawMapping the raw mapping to cache
+   */
   public CachedDNSToSwitchMapping(DNSToSwitchMapping rawMapping) {
     this.rawMapping = rawMapping;
   }
-  
 
   /**
-   * Returns the hosts from 'names' that have not been cached previously
+   * @param names a list of hostnames to probe for being cached
+   * @return the hosts from 'names' that have not been cached previously
    */
   private List<String> getUncachedHosts(List<String> names) {
     // find out all names without cached resolved location
@@ -58,7 +62,12 @@ public class CachedDNSToSwitchMapping im
   }
 
   /**
-   * Caches the resolved hosts
+   * Caches the resolved host:rack mappings. The two list
+   * parameters must be of equal size.
+   *
+   * @param uncachedHosts a list of hosts that were uncached
+   * @param resolvedHosts a list of resolved host entries where the element
+   * at index(i) is the resolved value for the entry in uncachedHosts[i]
    */
   private void cacheResolvedHosts(List<String> uncachedHosts, 
       List<String> resolvedHosts) {
@@ -71,8 +80,9 @@ public class CachedDNSToSwitchMapping im
   }
 
   /**
-   * Returns the cached resolution of the list of hostnames/addresses.
-   * Returns null if any of the names are not currently in the cache
+   * @param names a list of hostnames to look up (can be be empty)
+   * @return the cached resolution of the list of hostnames/addresses.
+   *  or null if any of the names are not currently in the cache
    */
   private List<String> getCachedHosts(List<String> names) {
     List<String> result = new ArrayList<String>(names.size());
@@ -88,6 +98,7 @@ public class CachedDNSToSwitchMapping im
     return result;
   }
 
+  @Override
   public List<String> resolve(List<String> names) {
     // normalize all input names to be in the form of IP addresses
     names = NetUtils.normalizeHostNames(names);
@@ -97,12 +108,14 @@ public class CachedDNSToSwitchMapping im
       return result;
     }
 
-    List<String> uncachedHosts = this.getUncachedHosts(names);
+    List<String> uncachedHosts = getUncachedHosts(names);
 
     // Resolve the uncached hosts
     List<String> resolvedHosts = rawMapping.resolve(uncachedHosts);
-    this.cacheResolvedHosts(uncachedHosts, resolvedHosts);
-    return this.getCachedHosts(names);
+    //cache them
+    cacheResolvedHosts(uncachedHosts, resolvedHosts);
+    //now look up the entire list in the cache
+    return getCachedHosts(names);
 
   }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNSToSwitchMapping.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNSToSwitchMapping.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNSToSwitchMapping.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNSToSwitchMapping.java Wed Nov  2 05:34:31 2011
@@ -23,7 +23,7 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.classification.InterfaceStability;
 
 /**
- * An interface that should be implemented to allow pluggable 
+ * An interface that must be implemented to allow pluggable
  * DNS-name/IP-address to RackID resolvers.
  *
  */
@@ -40,8 +40,9 @@ public interface DNSToSwitchMapping {
    * Note the hostname/ip-address is not part of the returned path.
    * The network topology of the cluster would determine the number of
    * components in the network path.
-   * @param names
-   * @return list of resolved network paths
+   * @param names the list of hosts to resolve (can be empty)
+   * @return list of resolved network paths.
+   * If <i>names</i> is empty, the returned list is also empty
    */
   public List<String> resolve(List<String> names);
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java Wed Nov  2 05:34:31 2011
@@ -150,12 +150,38 @@ public class NetUtils {
    */
   public static InetSocketAddress createSocketAddr(String target,
                                                    int defaultPort) {
+    return createSocketAddr(target, defaultPort, null);
+  }
+
+  /**
+   * Create an InetSocketAddress from the given target string and
+   * default port. If the string cannot be parsed correctly, the
+   * <code>configName</code> parameter is used as part of the
+   * exception message, allowing the user to better diagnose
+   * the misconfiguration.
+   *
+   * @param target a string of either "host" or "host:port"
+   * @param defaultPort the default port if <code>target</code> does not
+   *                    include a port number
+   * @param configName the name of the configuration from which
+   *                   <code>target</code> was loaded. This is used in the
+   *                   exception message in the case that parsing fails. 
+   */
+  public static InetSocketAddress createSocketAddr(String target,
+                                                   int defaultPort,
+                                                   String configName) {
+    String helpText = "";
+    if (configName != null) {
+      helpText = " (configuration property '" + configName + "')";
+    }
     if (target == null) {
-      throw new IllegalArgumentException("Target address cannot be null.");
+      throw new IllegalArgumentException("Target address cannot be null." +
+          helpText);
     }
     int colonIndex = target.indexOf(':');
     if (colonIndex < 0 && defaultPort == -1) {
-      throw new RuntimeException("Not a host:port pair: " + target);
+      throw new RuntimeException("Not a host:port pair: " + target +
+          helpText);
     }
     String hostname;
     int port = -1;
@@ -165,7 +191,14 @@ public class NetUtils {
       } else {
         // must be the old style <host>:<port>
         hostname = target.substring(0, colonIndex);
-        port = Integer.parseInt(target.substring(colonIndex + 1));
+        String portStr = target.substring(colonIndex + 1);
+        try {
+          port = Integer.parseInt(portStr);
+        } catch (NumberFormatException nfe) {
+          throw new IllegalArgumentException(
+              "Can't parse port '" + portStr + "'"
+              + helpText);
+        }
       }
     } else {
       // a new uri

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java Wed Nov  2 05:34:31 2011
@@ -45,8 +45,8 @@ public class NetworkTopology {
   public static final Log LOG = 
     LogFactory.getLog(NetworkTopology.class);
     
-  /* Inner Node represent a switch/router of a data center or rack.
-   * Different from a leave node, it has non-null children.
+  /** InnerNode represents a switch/router of a data center or rack.
+   * Different from a leaf node, it has non-null children.
    */
   private class InnerNode extends NodeBase {
     private ArrayList<Node> children=new ArrayList<Node>();
@@ -68,16 +68,16 @@ public class NetworkTopology {
       super(name, location, parent, level);
     }
         
-    /** Get its children */
+    /** @return its children */
     Collection<Node> getChildren() {return children;}
         
-    /** Return the number of children this node has */
+    /** @return the number of children this node has */
     int getNumOfChildren() {
       return children.size();
     }
         
     /** Judge if this node represents a rack 
-     * Return true if it has no child or its children are not InnerNodes
+     * @return true if it has no child or its children are not InnerNodes
      */ 
     boolean isRack() {
       if (children.isEmpty()) {
@@ -225,7 +225,11 @@ public class NetworkTopology {
       }
     } // end of remove
         
-    /** Given a node's string representation, return a reference to the node */ 
+    /** Given a node's string representation, return a reference to the node
+     * @param loc string location of the form /rack/node
+     * @return null if the node is not found or the childnode is there but
+     * not an instance of {@link InnerNode}
+     */
     private Node getLoc(String loc) {
       if (loc == null || loc.length() == 0) return this;
             
@@ -246,7 +250,12 @@ public class NetworkTopology {
     }
         
     /** get <i>leafIndex</i> leaf of this subtree 
-     * if it is not in the <i>excludedNode</i>*/
+     * if it is not in the <i>excludedNode</i>
+     *
+     * @param leafIndex an indexed leaf of the node
+     * @param excludedNode an excluded node (can be null)
+     * @return
+     */
     private Node getLeaf(int leafIndex, Node excludedNode) {
       int count=0;
       // check if the excluded node a leaf
@@ -297,9 +306,14 @@ public class NetworkTopology {
       return numOfLeaves;
     }
   } // end of InnerNode
-    
-  InnerNode clusterMap = new InnerNode(InnerNode.ROOT); // the root
-  private int numOfRacks = 0;  // rack counter
+
+  /**
+   * the root cluster map
+   */
+  InnerNode clusterMap = new InnerNode(InnerNode.ROOT);
+  /** rack counter */
+  private int numOfRacks = 0;
+  /** the lock used to manage access */
   private ReadWriteLock netlock;
     
   public NetworkTopology() {
@@ -308,8 +322,7 @@ public class NetworkTopology {
     
   /** Add a leaf node
    * Update node counter & rack counter if necessary
-   * @param node
-   *          node to be added
+   * @param node node to be added; can be null
    * @exception IllegalArgumentException if add a node to a leave 
                                          or node to be added is not a leaf
    */
@@ -342,9 +355,8 @@ public class NetworkTopology {
   }
     
   /** Remove a node
-   * Update node counter & rack counter if necessary
-   * @param node
-   *          node to be removed
+   * Update node counter and rack counter if necessary
+   * @param node node to be removed; can be null
    */ 
   public void remove(Node node) {
     if (node==null) return;
@@ -371,8 +383,7 @@ public class NetworkTopology {
        
   /** Check if the tree contains node <i>node</i>
    * 
-   * @param node
-   *          a node
+   * @param node a node
    * @return true if <i>node</i> is already in the tree; false otherwise
    */
   public boolean contains(Node node) {
@@ -380,10 +391,11 @@ public class NetworkTopology {
     netlock.readLock().lock();
     try {
       Node parent = node.getParent();
-      for(int level=node.getLevel(); parent!=null&&level>0;
-          parent=parent.getParent(), level--) {
-        if (parent == clusterMap)
+      for (int level = node.getLevel(); parent != null && level > 0;
+           parent = parent.getParent(), level--) {
+        if (parent == clusterMap) {
           return true;
+        }
       }
     } finally {
       netlock.readLock().unlock();
@@ -409,7 +421,7 @@ public class NetworkTopology {
     }
   }
     
-  /** Return the total number of racks */
+  /** @return the total number of racks */
   public int getNumOfRacks() {
     netlock.readLock().lock();
     try {
@@ -419,7 +431,7 @@ public class NetworkTopology {
     }
   }
     
-  /** Return the total number of nodes */
+  /** @return the total number of leaf nodes */
   public int getNumOfLeaves() {
     netlock.readLock().lock();
     try {
@@ -432,11 +444,11 @@ public class NetworkTopology {
   /** Return the distance between two nodes
    * It is assumed that the distance from one node to its parent is 1
    * The distance between two nodes is calculated by summing up their distances
-   * to their closest common  ancestor.
+   * to their closest common ancestor.
    * @param node1 one node
    * @param node2 another node
-   * @return the distance between node1 and node2
-   * node1 or node2 do not belong to the cluster
+   * @return the distance between node1 and node2 which is zero if they are the same
+   *  or {@link Integer#MAX_VALUE} if node1 or node2 do not belong to the cluster
    */
   public int getDistance(Node node1, Node node2) {
     if (node1 == node2) {
@@ -477,8 +489,8 @@ public class NetworkTopology {
   } 
     
   /** Check if two nodes are on the same rack
-   * @param node1 one node
-   * @param node2 another node
+   * @param node1 one node (can be null)
+   * @param node2 another node (can be null)
    * @return true if node1 and node2 are on the same rack; false otherwise
    * @exception IllegalArgumentException when either node1 or node2 is null, or
    * node1 or node2 do not belong to the cluster
@@ -622,6 +634,8 @@ public class NetworkTopology {
    * If neither local node or local rack node is found, put a random replica
    * location at position 0.
    * It leaves the rest nodes untouched.
+   * @param reader the node that wishes to read a block from one of the nodes
+   * @param nodes the list of nodes containing data for the reader
    */
   public void pseudoSortByDistance( Node reader, Node[] nodes ) {
     int tempIndex = 0;

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/Node.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/Node.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/Node.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/Node.java Wed Nov  2 05:34:31 2011
@@ -33,20 +33,31 @@ import org.apache.hadoop.classification.
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Unstable
 public interface Node {
-  /** Return the string representation of this node's network location */
+  /** @return the string representation of this node's network location */
   public String getNetworkLocation();
-  /** Set the node's network location */
+
+  /** Set this node's network location
+   * @param location the location
+   */
   public void setNetworkLocation(String location);
-  /** Return this node's name */
+  /** @return this node's name */
   public String getName();
-  /** Return this node's parent */
+
+  /** @return this node's parent */
   public Node getParent();
-  /** Set this node's parent */
+
+  /** Set this node's parent
+   * @param parent the parent
+   */
   public void setParent(Node parent);
-  /** Return this node's level in the tree.
+
+  /** @return this node's level in the tree.
    * E.g. the root of a tree returns 0 and its children return 1
    */
   public int getLevel();
-  /** Set this node's level in the tree.*/
+
+  /** Set this node's level in the tree
+   * @param i the level
+   */
   public void setLevel(int i);
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NodeBase.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NodeBase.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NodeBase.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NodeBase.java Wed Nov  2 05:34:31 2011
@@ -27,9 +27,12 @@ import org.apache.hadoop.classification.
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Unstable
 public class NodeBase implements Node {
+  /** Path separator {@value} */
   public final static char PATH_SEPARATOR = '/';
+  /** Path separator as a string {@value} */
   public final static String PATH_SEPARATOR_STR = "/";
-  public final static String ROOT = ""; // string representation of root
+  /** string representation of root {@value} */
+  public final static String ROOT = "";
   
   protected String name; //host:port#
   protected String location; //string representation of this node's location
@@ -55,7 +58,7 @@ public class NodeBase implements Node {
   }
   
   /** Construct a node from its name and its location
-   * @param name this node's name 
+   * @param name this node's name (can be null, must not contain {@link #PATH_SEPARATOR})
    * @param location this node's location 
    */
   public NodeBase(String name, String location) {
@@ -63,7 +66,7 @@ public class NodeBase implements Node {
   }
   
   /** Construct a node from its name and its location
-   * @param name this node's name 
+   * @param name this node's name (can be null, must not contain {@link #PATH_SEPARATOR})
    * @param location this node's location 
    * @param parent this node's parent node
    * @param level this node's level in the tree
@@ -74,7 +77,11 @@ public class NodeBase implements Node {
     this.level = level;
   }
 
-  /* set this node's name and location */
+  /**
+   * set this node's name and location
+   * @param name the (nullable) name -which cannot contain the {@link #PATH_SEPARATOR}
+   * @param location the location
+   */
   private void set(String name, String location) {
     if (name != null && name.contains(PATH_SEPARATOR_STR))
       throw new IllegalArgumentException(
@@ -83,27 +90,43 @@ public class NodeBase implements Node {
     this.location = location;      
   }
   
-  /** Return this node's name */
+  /** @return this node's name */
+  @Override
   public String getName() { return name; }
   
-  /** Return this node's network location */
+  /** @return this node's network location */
+  @Override
   public String getNetworkLocation() { return location; }
   
-  /** Set this node's network location */
+  /** Set this node's network location
+   * @param location the location
+   */
+  @Override
   public void setNetworkLocation(String location) { this.location = location; }
   
-  /** Return this node's path */
+  /**
+   * Get the path of a node
+   * @param node a non-null node
+   * @return the path of a node
+   */
   public static String getPath(Node node) {
     return node.getNetworkLocation()+PATH_SEPARATOR_STR+node.getName();
   }
   
-  /** Return this node's string representation */
+  /** @return this node's path as its string representation */
+  @Override
   public String toString() {
     return getPath(this);
   }
 
-  /** Normalize a path */
-  static public String normalize(String path) {
+  /** Normalize a path by stripping off any trailing {@link #PATH_SEPARATOR}
+   * @param path path to normalize.
+   * @return the normalised path
+   * If <i>path</i>is null or empty {@link #ROOT} is returned
+   * @throws IllegalArgumentException if the first character of a non empty path
+   * is not {@link #PATH_SEPARATOR}
+   */
+  public static String normalize(String path) {
     if (path == null || path.length() == 0) return ROOT;
     
     if (path.charAt(0) != PATH_SEPARATOR) {
@@ -119,20 +142,28 @@ public class NodeBase implements Node {
     return path;
   }
   
-  /** Return this node's parent */
+  /** @return this node's parent */
+  @Override
   public Node getParent() { return parent; }
   
-  /** Set this node's parent */
+  /** Set this node's parent
+   * @param parent the parent
+   */
+  @Override
   public void setParent(Node parent) {
     this.parent = parent;
   }
   
-  /** Return this node's level in the tree.
+  /** @return this node's level in the tree.
    * E.g. the root of a tree returns 0 and its children return 1
    */
+  @Override
   public int getLevel() { return level; }
   
-  /** Set this node's level in the tree */
+  /** Set this node's level in the tree
+   * @param level the level
+   */
+  @Override
   public void setLevel(int level) {
     this.level = level;
   }

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java Wed Nov  2 05:34:31 2011
@@ -23,16 +23,16 @@ import java.io.*;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.util.*;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.*;
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 
 /**
  * This class implements the {@link DNSToSwitchMapping} interface using a 
- * script configured via net.topology.script.file.name .
+ * script configured via the {@link CommonConfigurationKeys#NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY}
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
@@ -42,50 +42,86 @@ implements Configurable
   public ScriptBasedMapping() {
     super(new RawScriptBasedMapping());
   }
-  
-  // script must accept at least this many args
+
+  /**
+   * Minimum number of arguments: {@value}
+   */
   static final int MIN_ALLOWABLE_ARGS = 1;
-  
+
+  /**
+   * Default number of arguments: {@value}
+   */
   static final int DEFAULT_ARG_COUNT = 
                      CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_NUMBER_ARGS_DEFAULT;
-  
+
+  /**
+   * key to the script filename {@value}
+   */
   static final String SCRIPT_FILENAME_KEY = 
                      CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY ;
-  static final String SCRIPT_ARG_COUNT_KEY = 
+  /**
+   * key to the argument count that the script supports
+   */
+  static final String SCRIPT_ARG_COUNT_KEY =
                      CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_NUMBER_ARGS_KEY ;
-  
+
+  /**
+   * Create an instance from the given configuration
+   * @param conf configuration
+   */
   public ScriptBasedMapping(Configuration conf) {
     this();
     setConf(conf);
   }
-  
+
+  @Override
   public Configuration getConf() {
     return ((RawScriptBasedMapping)rawMapping).getConf();
   }
-  
+
+  @Override
   public void setConf(Configuration conf) {
     ((RawScriptBasedMapping)rawMapping).setConf(conf);
   }
-  
+
+  /**
+   * This is the uncached script mapping that is fed into the cache managed
+   * by the superclass {@link CachedDNSToSwitchMapping}
+   */
   private static final class RawScriptBasedMapping
-  implements DNSToSwitchMapping {
-  private String scriptName;
-  private Configuration conf;
-  private int maxArgs; //max hostnames per call of the script
-  private static Log LOG = 
-    LogFactory.getLog(ScriptBasedMapping.class);
-  public void setConf (Configuration conf) {
-    this.scriptName = conf.get(SCRIPT_FILENAME_KEY);
-    this.maxArgs = conf.getInt(SCRIPT_ARG_COUNT_KEY, DEFAULT_ARG_COUNT);
-    this.conf = conf;
-  }
-  public Configuration getConf () {
-    return conf;
-  }
-  
-  public RawScriptBasedMapping() {}
-  
-  public List<String> resolve(List<String> names) {
+      implements DNSToSwitchMapping {
+    private String scriptName;
+    private Configuration conf;
+    private int maxArgs; //max hostnames per call of the script
+    private static Log LOG =
+        LogFactory.getLog(ScriptBasedMapping.class);
+
+    /**
+     * Set the configuration and
+     * @param conf extract the configuration parameters of interest
+     */
+    public void setConf (Configuration conf) {
+      this.scriptName = conf.get(SCRIPT_FILENAME_KEY);
+      this.maxArgs = conf.getInt(SCRIPT_ARG_COUNT_KEY, DEFAULT_ARG_COUNT);
+      this.conf = conf;
+    }
+
+    /**
+     * Get the configuration
+     * @return the configuration
+     */
+    public Configuration getConf () {
+      return conf;
+    }
+
+    /**
+     * Constructor. The mapping is not ready to use until
+     * {@link #setConf(Configuration)} has been called
+     */
+    public RawScriptBasedMapping() {}
+
+    @Override
+    public List<String> resolve(List<String> names) {
     List <String> m = new ArrayList<String>(names.size());
     
     if (names.isEmpty()) {
@@ -109,7 +145,7 @@ implements Configurable
       
       if (m.size() != names.size()) {
         // invalid number of entries returned by the script
-        LOG.warn("Script " + scriptName + " returned "
+        LOG.error("Script " + scriptName + " returned "
             + Integer.toString(m.size()) + " values when "
             + Integer.toString(names.size()) + " were expected.");
         return null;
@@ -123,45 +159,53 @@ implements Configurable
     return m;
   }
 
-  private String runResolveCommand(List<String> args) {
-    int loopCount = 0;
-    if (args.size() == 0) {
-      return null;
-    }
-    StringBuilder allOutput = new StringBuilder();
-    int numProcessed = 0;
-    if (maxArgs < MIN_ALLOWABLE_ARGS) {
-      LOG.warn("Invalid value " + Integer.toString(maxArgs)
-          + " for " + SCRIPT_ARG_COUNT_KEY + "; must be >= "
-          + Integer.toString(MIN_ALLOWABLE_ARGS));
-      return null;
-    }
-    
-    while (numProcessed != args.size()) {
-      int start = maxArgs * loopCount;
-      List <String> cmdList = new ArrayList<String>();
-      cmdList.add(scriptName);
-      for (numProcessed = start; numProcessed < (start + maxArgs) && 
-           numProcessed < args.size(); numProcessed++) {
-        cmdList.add(args.get(numProcessed)); 
-      }
-      File dir = null;
-      String userDir;
-      if ((userDir = System.getProperty("user.dir")) != null) {
-        dir = new File(userDir);
-      }
-      ShellCommandExecutor s = new ShellCommandExecutor(
-                                   cmdList.toArray(new String[0]), dir);
-      try {
-        s.execute();
-        allOutput.append(s.getOutput() + " ");
-      } catch (Exception e) {
-        LOG.warn("Exception: ", e);
+    /**
+     * Build and execute the resolution command. The command is
+     * executed in the directory specified by the system property
+     * "user.dir" if set; otherwise the current working directory is used
+     * @param args a list of arguments
+     * @return null if the number of arguments is out of range,
+     * or the output of the command.
+     */
+    private String runResolveCommand(List<String> args) {
+      int loopCount = 0;
+      if (args.size() == 0) {
         return null;
       }
-      loopCount++; 
+      StringBuilder allOutput = new StringBuilder();
+      int numProcessed = 0;
+      if (maxArgs < MIN_ALLOWABLE_ARGS) {
+        LOG.warn("Invalid value " + Integer.toString(maxArgs)
+            + " for " + SCRIPT_ARG_COUNT_KEY + "; must be >= "
+            + Integer.toString(MIN_ALLOWABLE_ARGS));
+        return null;
+      }
+
+      while (numProcessed != args.size()) {
+        int start = maxArgs * loopCount;
+        List<String> cmdList = new ArrayList<String>();
+        cmdList.add(scriptName);
+        for (numProcessed = start; numProcessed < (start + maxArgs) &&
+            numProcessed < args.size(); numProcessed++) {
+          cmdList.add(args.get(numProcessed));
+        }
+        File dir = null;
+        String userDir;
+        if ((userDir = System.getProperty("user.dir")) != null) {
+          dir = new File(userDir);
+        }
+        ShellCommandExecutor s = new ShellCommandExecutor(
+            cmdList.toArray(new String[0]), dir);
+        try {
+          s.execute();
+          allOutput.append(s.getOutput() + " ");
+        } catch (Exception e) {
+          LOG.warn("Exception: ", e);
+          return null;
+        }
+        loopCount++;
+      }
+      return allOutput.toString();
     }
-    return allOutput.toString();
-  }
   }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java Wed Nov  2 05:34:31 2011
@@ -18,6 +18,7 @@ package org.apache.hadoop.security;
 
 import java.io.IOException;
 import java.net.InetAddress;
+import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URL;
 import java.net.UnknownHostException;
@@ -34,7 +35,9 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenInfo;
 
 import sun.security.jgss.krb5.Krb5Util;
@@ -313,17 +316,23 @@ public class SecurityUtil {
    * @param conf configuration object
    * @return the KerberosInfo or null if it has no KerberosInfo defined
    */
-  public static KerberosInfo getKerberosInfo(Class<?> protocol, Configuration conf) {
-    for(SecurityInfo provider: testProviders) {
-      KerberosInfo result = provider.getKerberosInfo(protocol, conf);
-      if (result != null) {
-        return result;
+  public static KerberosInfo 
+  getKerberosInfo(Class<?> protocol, Configuration conf) {
+    synchronized (testProviders) {
+      for(SecurityInfo provider: testProviders) {
+        KerberosInfo result = provider.getKerberosInfo(protocol, conf);
+        if (result != null) {
+          return result;
+        }
       }
     }
-    for(SecurityInfo provider: securityInfoProviders) {
-      KerberosInfo result = provider.getKerberosInfo(protocol, conf);
-      if (result != null) {
-        return result;
+    
+    synchronized (securityInfoProviders) {
+      for(SecurityInfo provider: securityInfoProviders) {
+        KerberosInfo result = provider.getKerberosInfo(protocol, conf);
+        if (result != null) {
+          return result;
+        }
       }
     }
     return null;
@@ -337,19 +346,43 @@ public class SecurityUtil {
    * @return the TokenInfo or null if it has no KerberosInfo defined
    */
   public static TokenInfo getTokenInfo(Class<?> protocol, Configuration conf) {
-    for(SecurityInfo provider: testProviders) {
-      TokenInfo result = provider.getTokenInfo(protocol, conf);
-      if (result != null) {
-        return result;
-      }      
-    }
-    for(SecurityInfo provider: securityInfoProviders) {
-      TokenInfo result = provider.getTokenInfo(protocol, conf);
-      if (result != null) {
-        return result;
+    synchronized (testProviders) {
+      for(SecurityInfo provider: testProviders) {
+        TokenInfo result = provider.getTokenInfo(protocol, conf);
+        if (result != null) {
+          return result;
+        }      
       }
-    } 
+    }
+    
+    synchronized (securityInfoProviders) {
+      for(SecurityInfo provider: securityInfoProviders) {
+        TokenInfo result = provider.getTokenInfo(protocol, conf);
+        if (result != null) {
+          return result;
+        }
+      } 
+    }
+    
     return null;
   }
 
+  /**
+   * Set the given token's service to the format expected by the RPC client 
+   * @param token a delegation token
+   * @param addr the socket for the rpc connection
+   */
+  public static void setTokenService(Token<?> token, InetSocketAddress addr) {
+    token.setService(buildTokenService(addr));
+  }
+  
+  /**
+   * Construct the service key for a token
+   * @param addr InetSocketAddress of remote connection with a token
+   * @return "ip:port"
+   */
+  public static Text buildTokenService(InetSocketAddress addr) {
+    String host = addr.getAddress().getHostAddress();
+    return new Text(host + ":" + addr.getPort());
+  }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java Wed Nov  2 05:34:31 2011
@@ -635,6 +635,23 @@ public class UserGroupInformation {
   }
   
   /**
+   * Re-login a user from keytab if TGT is expired or is close to expiry.
+   * 
+   * @throws IOException
+   */
+  public synchronized void checkTGTAndReloginFromKeytab() throws IOException {
+    if (!isSecurityEnabled()
+        || user.getAuthenticationMethod() != AuthenticationMethod.KERBEROS
+        || !isKeytab)
+      return;
+    KerberosTicket tgt = getTGT();
+    if (tgt != null && System.currentTimeMillis() < getRefreshTime(tgt)) {
+      return;
+    }
+    reloginFromKeytab();
+  }
+
+  /**
    * Re-Login a user in from a keytab file. Loads a user identity from a keytab
    * file and logs them in. They become the currently logged-in user. This
    * method assumes that {@link #loginUserFromKeytab(String, String)} had 

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java Wed Nov  2 05:34:31 2011
@@ -22,11 +22,15 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 import java.util.Arrays;
+import java.util.ServiceLoader;
 
 import org.apache.commons.codec.binary.Base64;
-
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+  
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.Text;
@@ -40,10 +44,12 @@ import org.apache.hadoop.io.WritableUtil
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Evolving
 public class Token<T extends TokenIdentifier> implements Writable {
+  public static final Log LOG = LogFactory.getLog(Token.class);
   private byte[] identifier;
   private byte[] password;
   private Text kind;
   private Text service;
+  private TokenRenewer renewer;
   
   /**
    * Construct a token given a token identifier and a secret manager for the
@@ -83,6 +89,17 @@ public class Token<T extends TokenIdenti
   }
 
   /**
+   * Clone a token.
+   * @param other the token to clone
+   */
+  public Token(Token<T> other) {
+    this.identifier = other.identifier;
+    this.password = other.password;
+    this.kind = other.kind;
+    this.service = other.service;
+  }
+
+  /**
    * Get the token identifier
    * @return the token identifier
    */
@@ -102,11 +119,22 @@ public class Token<T extends TokenIdenti
    * Get the token kind
    * @return the kind of the token
    */
-  public Text getKind() {
+  public synchronized Text getKind() {
     return kind;
   }
 
   /**
+   * Set the token kind. This is only intended to be used by services that
+   * wrap another service's token, such as HFTP wrapping HDFS.
+   * @param newKind
+   */
+  @InterfaceAudience.Private
+  public synchronized void setKind(Text newKind) {
+    kind = newKind;
+    renewer = null;
+  }
+
+  /**
    * Get the service on which the token is supposed to be used
    * @return the service name
    */
@@ -244,4 +272,92 @@ public class Token<T extends TokenIdenti
     buffer.append(service.toString());
     return buffer.toString();
   }
+  
+  private static ServiceLoader<TokenRenewer> renewers =
+      ServiceLoader.load(TokenRenewer.class);
+
+  private synchronized TokenRenewer getRenewer() throws IOException {
+    if (renewer != null) {
+      return renewer;
+    }
+    renewer = TRIVIAL_RENEWER;
+    synchronized (renewers) {
+      for (TokenRenewer canidate : renewers) {
+        if (canidate.handleKind(this.kind)) {
+          renewer = canidate;
+          return renewer;
+        }
+      }
+    }
+    LOG.warn("No TokenRenewer defined for token kind " + this.kind);
+    return renewer;
+  }
+
+  /**
+   * Is this token managed so that it can be renewed or cancelled?
+   * @return true, if it can be renewed and cancelled.
+   */
+  public boolean isManaged() throws IOException {
+    return getRenewer().isManaged(this);
+  }
+
+  /**
+   * Renew this delegation token
+   * @return the new expiration time
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  public long renew(Configuration conf
+                    ) throws IOException, InterruptedException {
+    return getRenewer().renew(this, conf);
+  }
+  
+  /**
+   * Cancel this delegation token
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  public void cancel(Configuration conf
+                     ) throws IOException, InterruptedException {
+    getRenewer().cancel(this, conf);
+  }
+  
+  /**
+   * A trivial renewer for token kinds that aren't managed. Sub-classes need
+   * to implement getKind for their token kind.
+   */
+  @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+  @InterfaceStability.Evolving
+  public static class TrivialRenewer extends TokenRenewer {
+    
+    // define the kind for this renewer
+    protected Text getKind() {
+      return null;
+    }
+
+    @Override
+    public boolean handleKind(Text kind) {
+      return kind.equals(getKind());
+    }
+
+    @Override
+    public boolean isManaged(Token<?> token) {
+      return false;
+    }
+
+    @Override
+    public long renew(Token<?> token, Configuration conf) {
+      throw new UnsupportedOperationException("Token renewal is not supported "+
+                                              " for " + token.kind + " tokens");
+    }
+
+    @Override
+    public void cancel(Token<?> token, Configuration conf) throws IOException,
+        InterruptedException {
+      throw new UnsupportedOperationException("Token cancel is not supported " +
+          " for " + token.kind + " tokens");
+    }
+
+  }
+  private static final TokenRenewer TRIVIAL_RENEWER = new TrivialRenewer();
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java Wed Nov  2 05:34:31 2011
@@ -210,6 +210,21 @@ extends AbstractDelegationTokenIdentifie
   }
 
   /**
+   * Verifies that the given identifier and password are valid and match.
+   * @param identifier Token identifier.
+   * @param password Password in the token.
+   * @throws InvalidToken
+   */
+  public synchronized void verifyToken(TokenIdent identifier, byte[] password)
+      throws InvalidToken {
+    byte[] storedPassword = retrievePassword(identifier);
+    if (!Arrays.equals(password, storedPassword)) {
+      throw new InvalidToken("token (" + identifier
+          + ") is invalid, password doesn't match");
+    }
+  }
+  
+  /**
    * Renew a delegation token.
    * @param token the token to renew
    * @param renewer the full principal name of the user doing the renewal

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java Wed Nov  2 05:34:31 2011
@@ -44,6 +44,10 @@ public class DataChecksum implements Che
   public static final int CHECKSUM_CRC32   = 1;
   public static final int CHECKSUM_CRC32C  = 2;
   
+  private static String[] NAMES = new String[] {
+    "NULL", "CRC32", "CRC32C"
+  };
+  
   private static final int CHECKSUM_NULL_SIZE  = 0;
   private static final int CHECKSUM_CRC32_SIZE = 4;
   private static final int CHECKSUM_CRC32C_SIZE = 4;
@@ -395,7 +399,33 @@ public class DataChecksum implements Che
     }
   }
 
-
+  @Override
+  public boolean equals(Object other) {
+    if (!(other instanceof DataChecksum)) {
+      return false;
+    }
+    DataChecksum o = (DataChecksum)other;
+    return o.bytesPerChecksum == this.bytesPerChecksum &&
+      o.type == this.type;
+  }
+  
+  @Override
+  public int hashCode() {
+    return (this.type + 31) * this.bytesPerChecksum;
+  }
+  
+  @Override
+  public String toString() {
+    String strType;
+    if (type < NAMES.length && type > 0) {
+      strType = NAMES[type];
+    } else {
+      strType = String.valueOf(type);
+    }
+    return "DataChecksum(type=" + strType +
+      ", chunkSize=" + bytesPerChecksum + ")";
+  }
+  
   /**
    * This just provides a dummy implimentation for Checksum class
    * This is used when there is no checksum available or required for 

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/native/configure.ac
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/native/configure.ac?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/native/configure.ac (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/native/configure.ac Wed Nov  2 05:34:31 2011
@@ -40,6 +40,7 @@ AC_CONFIG_AUX_DIR([config])
 AC_CONFIG_MACRO_DIR([m4])
 AC_CONFIG_HEADER([config.h])
 AC_SYS_LARGEFILE
+AC_GNU_SOURCE
 
 AM_INIT_AUTOMAKE(hadoop,1.0.0)
 
@@ -57,10 +58,8 @@ if test $JAVA_HOME != ""
 then
   JNI_LDFLAGS="-L$JAVA_HOME/jre/lib/$OS_ARCH/server"
 fi
-ldflags_bak=$LDFLAGS
 LDFLAGS="$LDFLAGS $JNI_LDFLAGS"
 AC_CHECK_LIB([jvm], [JNI_GetCreatedJavaVMs])
-LDFLAGS=$ldflags_bak
 AC_SUBST([JNI_LDFLAGS])
 
 # Checks for header files.
@@ -94,6 +93,12 @@ AC_CHECK_HEADERS([snappy-c.h], AC_COMPUT
 dnl Check for headers needed by the native Group resolution implementation
 AC_CHECK_HEADERS([fcntl.h stdlib.h string.h unistd.h], [], AC_MSG_ERROR(Some system headers not found... please ensure their presence on your platform.))
 
+dnl check for posix_fadvise
+AC_CHECK_HEADERS(fcntl.h, [AC_CHECK_FUNCS(posix_fadvise)])
+
+dnl check for sync_file_range
+AC_CHECK_HEADERS(fcntl.h, [AC_CHECK_FUNCS(sync_file_range)])
+
 # Checks for typedefs, structures, and compiler characteristics.
 AC_C_CONST
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c Wed Nov  2 05:34:31 2011
@@ -29,6 +29,7 @@
 #include <string.h>
 #include <sys/stat.h>
 #include <sys/types.h>
+#include <sys/syscall.h>
 #include <unistd.h>
 
 #include "org_apache_hadoop.h"
@@ -234,6 +235,81 @@ cleanup:
 }
 
 
+
+/**
+ * public static native void posix_fadvise(
+ *   FileDescriptor fd, long offset, long len, int flags);
+ */
+JNIEXPORT void JNICALL
+Java_org_apache_hadoop_io_nativeio_NativeIO_posix_1fadvise(
+  JNIEnv *env, jclass clazz,
+  jobject fd_object, jlong offset, jlong len, jint flags)
+{
+#ifndef HAVE_POSIX_FADVISE
+  THROW(env, "java/lang/UnsupportedOperationException",
+        "fadvise support not available");
+#else
+  int fd = fd_get(env, fd_object);
+  PASS_EXCEPTIONS(env);
+
+  int err = 0;
+  if ((err = posix_fadvise(fd, (off_t)offset, (off_t)len, flags))) {
+    throw_ioe(env, err);
+  }
+#endif
+}
+
+#if defined(HAVE_SYNC_FILE_RANGE)
+#  define my_sync_file_range sync_file_range
+#elif defined(SYS_sync_file_range)
+// RHEL 5 kernels have sync_file_range support, but the glibc
+// included does not have the library function. We can
+// still call it directly, and if it's not supported by the
+// kernel, we'd get ENOSYS. See RedHat Bugzilla #518581
+static int manual_sync_file_range (int fd, __off64_t from, __off64_t to, unsigned int flags)
+{
+#ifdef __x86_64__
+  return syscall( SYS_sync_file_range, fd, from, to, flags);
+#else
+  return syscall (SYS_sync_file_range, fd,
+    __LONG_LONG_PAIR ((long) (from >> 32), (long) from),
+    __LONG_LONG_PAIR ((long) (to >> 32), (long) to),
+    flags);
+#endif
+}
+#define my_sync_file_range manual_sync_file_range
+#endif
+
+/**
+ * public static native void sync_file_range(
+ *   FileDescriptor fd, long offset, long len, int flags);
+ */
+JNIEXPORT void JNICALL
+Java_org_apache_hadoop_io_nativeio_NativeIO_sync_1file_1range(
+  JNIEnv *env, jclass clazz,
+  jobject fd_object, jlong offset, jlong len, jint flags)
+{
+#ifndef my_sync_file_range
+  THROW(env, "java/lang/UnsupportedOperationException",
+        "sync_file_range support not available");
+#else
+  int fd = fd_get(env, fd_object);
+  PASS_EXCEPTIONS(env);
+
+  if (my_sync_file_range(fd, (off_t)offset, (off_t)len, flags)) {
+    if (errno == ENOSYS) {
+      // we know the syscall number, but it's not compiled
+      // into the running kernel
+      THROW(env, "java/lang/UnsupportedOperationException",
+            "sync_file_range kernel support not available");
+      return;
+    } else {
+      throw_ioe(env, errno);
+    }
+  }
+#endif
+}
+
 /*
  * public static native FileDescriptor open(String path, int flags, int mode);
  */

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/file_descriptor.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/file_descriptor.c?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/file_descriptor.c (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/file_descriptor.c Wed Nov  2 05:34:31 2011
@@ -54,6 +54,11 @@ void fd_deinit(JNIEnv *env) {
  * underlying fd, or throw if unavailable
  */
 int fd_get(JNIEnv* env, jobject obj) {
+  if (obj == NULL) {
+    THROW(env, "java/lang/NullPointerException",
+          "FileDescriptor object is null");
+    return -1;
+  }
   return (*env)->GetIntField(env, obj, fd_descriptor);
 }
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c Wed Nov  2 05:34:31 2011
@@ -124,6 +124,11 @@ JNIEXPORT void JNICALL Java_org_apache_h
       "bad offsets or lengths");
     return;
   }
+  if (unlikely(bytes_per_checksum) <= 0) {
+    THROW(env, "java/lang/IllegalArgumentException",
+      "invalid bytes_per_checksum");
+    return;
+  }
 
   uint32_t *sums = (uint32_t *)(sums_addr + sums_offset);
   uint8_t *data = data_addr + data_offset;



Mime
View raw message