hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1354832 [2/2] - in /hadoop/common/branches/HDFS-3092/hadoop-common-project: hadoop-annotations/src/main/java/org/apache/hadoop/classification/ hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/ hadoop-auth/src/main...
Date Thu, 28 Jun 2012 07:00:16 GMT
Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicy.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicy.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicy.java Thu Jun 28 06:59:38 2012
@@ -60,6 +60,12 @@ public interface RetryPolicy {
       this.reason = reason;
     }
     
+    @Override
+    public String toString() {
+      return getClass().getSimpleName() + "(action=" + action
+          + ", delayMillis=" + delayMillis + ", reason=" + reason + ")";
+    }
+    
     public enum RetryDecision {
       FAIL,
       RETRY,

Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryProxy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryProxy.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryProxy.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryProxy.java Thu Jun 28 06:59:38 2012
@@ -75,9 +75,10 @@ public class RetryProxy {
    */
   public static Object create(Class<?> iface, Object implementation,
                               Map<String,RetryPolicy> methodNameToPolicyMap) {
-    return RetryProxy.create(iface,
+    return create(iface,
         new DefaultFailoverProxyProvider(iface, implementation),
-        methodNameToPolicyMap);
+        methodNameToPolicyMap,
+        RetryPolicies.TRY_ONCE_THEN_FAIL);
   }
 
   /**
@@ -92,11 +93,13 @@ public class RetryProxy {
    * @return the retry proxy
    */
   public static Object create(Class<?> iface, FailoverProxyProvider proxyProvider,
-      Map<String,RetryPolicy> methodNameToPolicyMap) {
+      Map<String,RetryPolicy> methodNameToPolicyMap,
+      RetryPolicy defaultPolicy) {
     return Proxy.newProxyInstance(
         proxyProvider.getInterface().getClassLoader(),
         new Class<?>[] { iface },
-        new RetryInvocationHandler(proxyProvider, methodNameToPolicyMap)
+        new RetryInvocationHandler(proxyProvider, defaultPolicy,
+            methodNameToPolicyMap)
         );
   }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java Thu Jun 28 06:59:38 2012
@@ -18,47 +18,51 @@
 
 package org.apache.hadoop.ipc;
 
-import java.net.InetAddress;
-import java.net.Socket;
-import java.net.InetSocketAddress;
-import java.net.SocketTimeoutException;
-import java.net.UnknownHostException;
-import java.io.IOException;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
 import java.io.BufferedInputStream;
 import java.io.BufferedOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
 import java.io.FilterInputStream;
+import java.io.IOException;
 import java.io.InputStream;
+import java.io.InterruptedIOException;
 import java.io.OutputStream;
-
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.Socket;
+import java.net.SocketTimeoutException;
+import java.net.UnknownHostException;
 import java.security.PrivilegedExceptionAction;
 import java.util.Hashtable;
 import java.util.Iterator;
+import java.util.Map.Entry;
 import java.util.Random;
 import java.util.Set;
-import java.util.Map.Entry;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
 
 import javax.net.SocketFactory;
 
-import org.apache.commons.logging.*;
-
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
 import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
 import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcPayloadHeaderProto;
 import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcPayloadOperationProto;
 import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcResponseHeaderProto;
 import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcStatusProto;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableUtils;
-import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.SaslRpcClient;
@@ -67,8 +71,8 @@ import org.apache.hadoop.security.Securi
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.security.token.TokenSelector;
 import org.apache.hadoop.security.token.TokenInfo;
+import org.apache.hadoop.security.token.TokenSelector;
 import org.apache.hadoop.util.ProtoUtil;
 import org.apache.hadoop.util.ReflectionUtils;
 
@@ -80,8 +84,8 @@ import org.apache.hadoop.util.Reflection
  */
 public class Client {
   
-  public static final Log LOG =
-    LogFactory.getLog(Client.class);
+  public static final Log LOG = LogFactory.getLog(Client.class);
+
   private Hashtable<ConnectionId, Connection> connections =
     new Hashtable<ConnectionId, Connection>();
 
@@ -228,8 +232,7 @@ public class Client {
     private int rpcTimeout;
     private int maxIdleTime; //connections will be culled if it was idle for 
     //maxIdleTime msecs
-    private int maxRetries; //the max. no. of retries for socket connections
-    // the max. no. of retries for socket connections on time out exceptions
+    private final RetryPolicy connectionRetryPolicy;
     private int maxRetriesOnSocketTimeouts;
     private boolean tcpNoDelay; // if T then disable Nagle's Algorithm
     private boolean doPing; //do we need to send ping message
@@ -253,7 +256,7 @@ public class Client {
       }
       this.rpcTimeout = remoteId.getRpcTimeout();
       this.maxIdleTime = remoteId.getMaxIdleTime();
-      this.maxRetries = remoteId.getMaxRetries();
+      this.connectionRetryPolicy = remoteId.connectionRetryPolicy;
       this.maxRetriesOnSocketTimeouts = remoteId.getMaxRetriesOnSocketTimeouts();
       this.tcpNoDelay = remoteId.getTcpNoDelay();
       this.doPing = remoteId.getDoPing();
@@ -488,7 +491,7 @@ public class Client {
           if (updateAddress()) {
             timeoutFailures = ioFailures = 0;
           }
-          handleConnectionFailure(ioFailures++, maxRetries, ie);
+          handleConnectionFailure(ioFailures++, ie);
         }
       }
     }
@@ -680,8 +683,36 @@ public class Client {
         Thread.sleep(1000);
       } catch (InterruptedException ignored) {}
       
-      LOG.info("Retrying connect to server: " + server + 
-          ". Already tried " + curRetries + " time(s).");
+      LOG.info("Retrying connect to server: " + server + ". Already tried "
+          + curRetries + " time(s); maxRetries=" + maxRetries);
+    }
+
+    private void handleConnectionFailure(int curRetries, IOException ioe
+        ) throws IOException {
+      closeConnection();
+
+      final RetryAction action;
+      try {
+        action = connectionRetryPolicy.shouldRetry(ioe, curRetries, 0, true);
+      } catch(Exception e) {
+        throw e instanceof IOException? (IOException)e: new IOException(e);
+      }
+      if (action.action == RetryAction.RetryDecision.FAIL) {
+        if (action.reason != null) {
+          LOG.warn("Failed to connect to server: " + server + ": "
+              + action.reason, ioe);
+        }
+        throw ioe;
+      }
+
+      try {
+        Thread.sleep(action.delayMillis);
+      } catch (InterruptedException e) {
+        throw (IOException)new InterruptedIOException("Interrupted: action="
+            + action + ", retry policy=" + connectionRetryPolicy).initCause(e);
+      }
+      LOG.info("Retrying connect to server: " + server + ". Already tried "
+          + curRetries + " time(s); retry policy is " + connectionRetryPolicy);
     }
 
     /**
@@ -849,6 +880,10 @@ public class Client {
       try {
         RpcResponseHeaderProto response = 
             RpcResponseHeaderProto.parseDelimitedFrom(in);
+        if (response == null) {
+          throw new IOException("Response is null.");
+        }
+
         int callId = response.getCallId();
         if (LOG.isDebugEnabled())
           LOG.debug(getName() + " got value #" + callId);
@@ -1287,7 +1322,7 @@ public class Client {
     private final String serverPrincipal;
     private final int maxIdleTime; //connections will be culled if it was idle for 
     //maxIdleTime msecs
-    private final int maxRetries; //the max. no. of retries for socket connections
+    private final RetryPolicy connectionRetryPolicy;
     // the max. no. of retries for socket connections on time out exceptions
     private final int maxRetriesOnSocketTimeouts;
     private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
@@ -1297,7 +1332,7 @@ public class Client {
     ConnectionId(InetSocketAddress address, Class<?> protocol, 
                  UserGroupInformation ticket, int rpcTimeout,
                  String serverPrincipal, int maxIdleTime, 
-                 int maxRetries, int maxRetriesOnSocketTimeouts,
+                 RetryPolicy connectionRetryPolicy, int maxRetriesOnSocketTimeouts,
                  boolean tcpNoDelay, boolean doPing, int pingInterval) {
       this.protocol = protocol;
       this.address = address;
@@ -1305,7 +1340,7 @@ public class Client {
       this.rpcTimeout = rpcTimeout;
       this.serverPrincipal = serverPrincipal;
       this.maxIdleTime = maxIdleTime;
-      this.maxRetries = maxRetries;
+      this.connectionRetryPolicy = connectionRetryPolicy;
       this.maxRetriesOnSocketTimeouts = maxRetriesOnSocketTimeouts;
       this.tcpNoDelay = tcpNoDelay;
       this.doPing = doPing;
@@ -1336,10 +1371,6 @@ public class Client {
       return maxIdleTime;
     }
     
-    int getMaxRetries() {
-      return maxRetries;
-    }
-    
     /** max connection retries on socket time outs */
     public int getMaxRetriesOnSocketTimeouts() {
       return maxRetriesOnSocketTimeouts;
@@ -1357,6 +1388,12 @@ public class Client {
       return pingInterval;
     }
     
+    static ConnectionId getConnectionId(InetSocketAddress addr,
+        Class<?> protocol, UserGroupInformation ticket, int rpcTimeout,
+        Configuration conf) throws IOException {
+      return getConnectionId(addr, protocol, ticket, rpcTimeout, null, conf);
+    }
+
     /**
      * Returns a ConnectionId object. 
      * @param addr Remote address for the connection.
@@ -1367,9 +1404,18 @@ public class Client {
      * @return A ConnectionId instance
      * @throws IOException
      */
-    public static ConnectionId getConnectionId(InetSocketAddress addr,
+    static ConnectionId getConnectionId(InetSocketAddress addr,
         Class<?> protocol, UserGroupInformation ticket, int rpcTimeout,
-        Configuration conf) throws IOException {
+        RetryPolicy connectionRetryPolicy, Configuration conf) throws IOException {
+
+      if (connectionRetryPolicy == null) {
+        final int max = conf.getInt(
+            CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
+            CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_DEFAULT);
+        connectionRetryPolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(
+            max, 1, TimeUnit.SECONDS);
+      }
+
       String remotePrincipal = getRemotePrincipal(conf, addr, protocol);
       boolean doPing =
         conf.getBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, true);
@@ -1377,8 +1423,7 @@ public class Client {
           rpcTimeout, remotePrincipal,
           conf.getInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
               CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT),
-          conf.getInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
-              CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_DEFAULT),
+          connectionRetryPolicy,
           conf.getInt(
             CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
             CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT),
@@ -1421,7 +1466,7 @@ public class Client {
         return isEqual(this.address, that.address)
             && this.doPing == that.doPing
             && this.maxIdleTime == that.maxIdleTime
-            && this.maxRetries == that.maxRetries
+            && isEqual(this.connectionRetryPolicy, that.connectionRetryPolicy)
             && this.pingInterval == that.pingInterval
             && isEqual(this.protocol, that.protocol)
             && this.rpcTimeout == that.rpcTimeout
@@ -1434,11 +1479,10 @@ public class Client {
     
     @Override
     public int hashCode() {
-      int result = 1;
+      int result = connectionRetryPolicy.hashCode();
       result = PRIME * result + ((address == null) ? 0 : address.hashCode());
       result = PRIME * result + (doPing ? 1231 : 1237);
       result = PRIME * result + maxIdleTime;
-      result = PRIME * result + maxRetries;
       result = PRIME * result + pingInterval;
       result = PRIME * result + ((protocol == null) ? 0 : protocol.hashCode());
       result = PRIME * result + rpcTimeout;

Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java Thu Jun 28 06:59:38 2012
@@ -36,9 +36,9 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.DataOutputOutputStream;
 import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.ipc.Client.ConnectionId;
 import org.apache.hadoop.ipc.RPC.RpcInvoker;
-
 import org.apache.hadoop.ipc.protobuf.HadoopRpcProtos.HadoopRpcRequestProto;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager;
@@ -66,15 +66,24 @@ public class ProtobufRpcEngine implement
 
   private static final ClientCache CLIENTS = new ClientCache();
 
+  public <T> ProtocolProxy<T> getProxy(Class<T> protocol, long clientVersion,
+      InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
+      SocketFactory factory, int rpcTimeout) throws IOException {
+    return getProxy(protocol, clientVersion, addr, ticket, conf, factory,
+        rpcTimeout, null);
+  }
+
   @Override
   @SuppressWarnings("unchecked")
   public <T> ProtocolProxy<T> getProxy(Class<T> protocol, long clientVersion,
       InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
-      SocketFactory factory, int rpcTimeout) throws IOException {
+      SocketFactory factory, int rpcTimeout, RetryPolicy connectionRetryPolicy
+      ) throws IOException {
 
-    return new ProtocolProxy<T>(protocol, (T) Proxy.newProxyInstance(protocol
-        .getClassLoader(), new Class[] { protocol }, new Invoker(protocol,
-        addr, ticket, conf, factory, rpcTimeout)), false);
+    final Invoker invoker = new Invoker(protocol, addr, ticket, conf, factory,
+        rpcTimeout, connectionRetryPolicy);
+    return new ProtocolProxy<T>(protocol, (T) Proxy.newProxyInstance(
+        protocol.getClassLoader(), new Class[]{protocol}, invoker), false);
   }
   
   @Override
@@ -97,11 +106,12 @@ public class ProtobufRpcEngine implement
     private final long clientProtocolVersion;
     private final String protocolName;
 
-    public Invoker(Class<?> protocol, InetSocketAddress addr,
+    private Invoker(Class<?> protocol, InetSocketAddress addr,
         UserGroupInformation ticket, Configuration conf, SocketFactory factory,
-        int rpcTimeout) throws IOException {
-      this(protocol, Client.ConnectionId.getConnectionId(addr, protocol,
-          ticket, rpcTimeout, conf), conf, factory);
+        int rpcTimeout, RetryPolicy connectionRetryPolicy) throws IOException {
+      this(protocol, Client.ConnectionId.getConnectionId(
+          addr, protocol, ticket, rpcTimeout, connectionRetryPolicy, conf),
+          conf, factory);
     }
     
     /**

Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java Thu Jun 28 06:59:38 2012
@@ -41,6 +41,7 @@ import org.apache.commons.logging.*;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.io.*;
+import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.ipc.Client.ConnectionId;
 import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolInfoService;
 import org.apache.hadoop.net.NetUtils;
@@ -326,7 +327,7 @@ public class RPC {
                              long clientVersion,
                              InetSocketAddress addr, Configuration conf,
                              long connTimeout) throws IOException { 
-    return waitForProtocolProxy(protocol, clientVersion, addr, conf, 0, connTimeout);
+    return waitForProtocolProxy(protocol, clientVersion, addr, conf, 0, null, connTimeout);
   }
   
   /**
@@ -347,7 +348,7 @@ public class RPC {
                              int rpcTimeout,
                              long timeout) throws IOException {
     return waitForProtocolProxy(protocol, clientVersion, addr,
-        conf, rpcTimeout, timeout).getProxy();
+        conf, rpcTimeout, null, timeout).getProxy();
   }
 
   /**
@@ -367,6 +368,7 @@ public class RPC {
                                long clientVersion,
                                InetSocketAddress addr, Configuration conf,
                                int rpcTimeout,
+                               RetryPolicy connectionRetryPolicy,
                                long timeout) throws IOException { 
     long startTime = System.currentTimeMillis();
     IOException ioe;
@@ -374,7 +376,7 @@ public class RPC {
       try {
         return getProtocolProxy(protocol, clientVersion, addr, 
             UserGroupInformation.getCurrentUser(), conf, NetUtils
-            .getDefaultSocketFactory(conf), rpcTimeout);
+            .getDefaultSocketFactory(conf), rpcTimeout, connectionRetryPolicy);
       } catch(ConnectException se) {  // namenode has not been started
         LOG.info("Server at " + addr + " not available yet, Zzzzz...");
         ioe = se;
@@ -463,7 +465,7 @@ public class RPC {
                                 Configuration conf,
                                 SocketFactory factory) throws IOException {
     return getProtocolProxy(
-        protocol, clientVersion, addr, ticket, conf, factory, 0);
+        protocol, clientVersion, addr, ticket, conf, factory, 0, null);
   }
   
   /**
@@ -489,7 +491,7 @@ public class RPC {
                                 SocketFactory factory,
                                 int rpcTimeout) throws IOException {
     return getProtocolProxy(protocol, clientVersion, addr, ticket,
-             conf, factory, rpcTimeout).getProxy();
+             conf, factory, rpcTimeout, null).getProxy();
   }
   
   /**
@@ -512,12 +514,13 @@ public class RPC {
                                 UserGroupInformation ticket,
                                 Configuration conf,
                                 SocketFactory factory,
-                                int rpcTimeout) throws IOException {    
+                                int rpcTimeout,
+                                RetryPolicy connectionRetryPolicy) throws IOException {    
     if (UserGroupInformation.isSecurityEnabled()) {
       SaslRpcServer.init(conf);
     }
-    return getProtocolEngine(protocol,conf).getProxy(protocol,
-        clientVersion, addr, ticket, conf, factory, rpcTimeout);
+    return getProtocolEngine(protocol,conf).getProxy(protocol, clientVersion,
+        addr, ticket, conf, factory, rpcTimeout, connectionRetryPolicy);
   }
 
    /**

Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java Thu Jun 28 06:59:38 2012
@@ -97,8 +97,9 @@ public class RemoteException extends IOE
     return new RemoteException(attrs.getValue("class"),
         attrs.getValue("message")); 
   }
-  
+
+  @Override
   public String toString() {
-    return className + ": " + getMessage();
+    return getClass().getName() + "(" + className + "): " + getMessage();
   }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcEngine.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcEngine.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcEngine.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcEngine.java Thu Jun 28 06:59:38 2012
@@ -26,6 +26,7 @@ import javax.net.SocketFactory;
 
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.ipc.Client.ConnectionId;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager;
@@ -40,7 +41,8 @@ public interface RpcEngine {
   <T> ProtocolProxy<T> getProxy(Class<T> protocol,
                   long clientVersion, InetSocketAddress addr,
                   UserGroupInformation ticket, Configuration conf,
-                  SocketFactory factory, int rpcTimeout) throws IOException;
+                  SocketFactory factory, int rpcTimeout,
+                  RetryPolicy connectionRetryPolicy) throws IOException;
 
   /** Expert: Make multiple, parallel calls to a set of servers. */
   Object[] call(Method method, Object[][] params, InetSocketAddress[] addrs,

Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java Thu Jun 28 06:59:38 2012
@@ -31,6 +31,7 @@ import javax.net.SocketFactory;
 import org.apache.commons.logging.*;
 
 import org.apache.hadoop.io.*;
+import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.ipc.Client.ConnectionId;
 import org.apache.hadoop.ipc.RPC.RpcInvoker;
 import org.apache.hadoop.ipc.VersionedProtocol;
@@ -259,9 +260,14 @@ public class WritableRpcEngine implement
   public <T> ProtocolProxy<T> getProxy(Class<T> protocol, long clientVersion,
                          InetSocketAddress addr, UserGroupInformation ticket,
                          Configuration conf, SocketFactory factory,
-                         int rpcTimeout)
+                         int rpcTimeout, RetryPolicy connectionRetryPolicy)
     throws IOException {    
 
+    if (connectionRetryPolicy != null) {
+      throw new UnsupportedOperationException(
+          "Not supported: connectionRetryPolicy=" + connectionRetryPolicy);
+    }
+
     T proxy = (T) Proxy.newProxyInstance(protocol.getClassLoader(),
         new Class[] { protocol }, new Invoker(protocol, addr, ticket, conf,
             factory, rpcTimeout));

Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java Thu Jun 28 06:59:38 2012
@@ -19,6 +19,7 @@ package org.apache.hadoop.net;
 
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.List;
 import java.util.Random;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -55,8 +56,8 @@ public class NetworkTopology {
   /** InnerNode represents a switch/router of a data center or rack.
    * Different from a leaf node, it has non-null children.
    */
-  private class InnerNode extends NodeBase {
-    private ArrayList<Node> children=new ArrayList<Node>();
+  static class InnerNode extends NodeBase {
+    protected List<Node> children=new ArrayList<Node>();
     private int numOfLeaves;
         
     /** Construct an InnerNode from a path-like string */
@@ -76,7 +77,7 @@ public class NetworkTopology {
     }
         
     /** @return its children */
-    Collection<Node> getChildren() {return children;}
+    List<Node> getChildren() {return children;}
         
     /** @return the number of children this node has */
     int getNumOfChildren() {
@@ -182,7 +183,23 @@ public class NetworkTopology {
         }
       }
     }
-        
+
+    /**
+     * Creates a parent node to be added to the list of children.  
+     * Creates a node using the InnerNode four argument constructor specifying 
+     * the name, location, parent, and level of this node.
+     * 
+     * <p>To be overridden in subclasses for specific InnerNode implementations,
+     * as alternative to overriding the full {@link #add(Node)} method.
+     * 
+     * @param parentName The name of the parent node
+     * @return A new inner node
+     * @see InnerNode#InnerNode(String, String, InnerNode, int)
+     */
+    protected InnerNode createParentNode(String parentName) {
+      return new InnerNode(parentName, getPath(this), this, this.getLevel()+1);
+    }
+
     /** Remove node <i>n</i> from the subtree of this node
      * @param n node to be deleted 
      * @return true if the node is deleted; false otherwise
@@ -263,7 +280,7 @@ public class NetworkTopology {
      * @param excludedNode an excluded node (can be null)
      * @return
      */
-    private Node getLeaf(int leafIndex, Node excludedNode) {
+    Node getLeaf(int leafIndex, Node excludedNode) {
       int count=0;
       // check if the excluded node a leaf
       boolean isLeaf =
@@ -308,7 +325,21 @@ public class NetworkTopology {
         return null;
       }
     }
-        
+    
+    /**
+      * Determine if children a leaves, default implementation calls {@link #isRack()}
+      * <p>To be overridden in subclasses for specific InnerNode implementations,
+      * as alternative to overriding the full {@link #getLeaf(int, Node)} method.
+      * 
+      * @return true if children are leaves, false otherwise
+      */
+    protected boolean areChildrenLeaves() {
+      return isRack();
+    }
+
+    /**
+     * Get number of leaves.
+     */
     int getNumOfLeaves() {
       return numOfLeaves;
     }
@@ -317,18 +348,18 @@ public class NetworkTopology {
   /**
    * the root cluster map
    */
-  InnerNode clusterMap = new InnerNode(InnerNode.ROOT);
+  InnerNode clusterMap;
   /** Depth of all leaf nodes */
   private int depthOfAllLeaves = -1;
   /** rack counter */
-  private int numOfRacks = 0;
+  protected int numOfRacks = 0;
   /** the lock used to manage access */
-  private ReadWriteLock netlock;
-    
+  protected ReadWriteLock netlock = new ReentrantReadWriteLock();
+
   public NetworkTopology() {
-    netlock = new ReentrantReadWriteLock();
+    clusterMap = new InnerNode(InnerNode.ROOT);
   }
-    
+
   /** Add a leaf node
    * Update node counter & rack counter if necessary
    * @param node node to be added; can be null
@@ -344,7 +375,7 @@ public class NetworkTopology {
     }
     netlock.writeLock().lock();
     try {
-      Node rack = getNode(node.getNetworkLocation());
+      Node rack = getNodeForNetworkLocation(node);
       if (rack != null && !(rack instanceof InnerNode)) {
         throw new IllegalArgumentException("Unexpected data node " 
                                            + node.toString() 
@@ -376,7 +407,26 @@ public class NetworkTopology {
       netlock.writeLock().unlock();
     }
   }
-    
+  
+  /**
+   * Return a reference to the node given its string representation.
+   * Default implementation delegates to {@link #getNode(String)}.
+   * 
+   * <p>To be overridden in subclasses for specific NetworkTopology 
+   * implementations, as alternative to overriding the full {@link #add(Node)}
+   *  method.
+   * 
+   * @param node The string representation of this node's network location is
+   * used to retrieve a Node object. 
+   * @return a reference to the node; null if the node is not in the tree
+   * 
+   * @see #add(Node)
+   * @see #getNode(String)
+   */
+  protected Node getNodeForNetworkLocation(Node node) {
+    return getNode(node.getNetworkLocation());
+  }
+  
   /** Remove a node
    * Update node counter and rack counter if necessary
    * @param node node to be removed; can be null
@@ -403,7 +453,7 @@ public class NetworkTopology {
       netlock.writeLock().unlock();
     }
   }
-       
+
   /** Check if the tree contains node <i>node</i>
    * 
    * @param node a node
@@ -443,7 +493,21 @@ public class NetworkTopology {
       netlock.readLock().unlock();
     }
   }
-    
+  
+  /** Given a string representation of a rack for a specific network
+   *  location
+   * 
+   * To be overridden in subclasses for specific NetworkTopology 
+   * implementations, as alternative to overriding the full 
+   * {@link #getRack(String)} method.
+   * @param loc
+   *          a path-like string representation of a network location
+   * @return a rack string
+   */
+  public String getRack(String loc) {
+    return loc;
+  }
+  
   /** @return the total number of racks */
   public int getNumOfRacks() {
     netlock.readLock().lock();
@@ -453,7 +517,7 @@ public class NetworkTopology {
       netlock.readLock().unlock();
     }
   }
-    
+
   /** @return the total number of leaf nodes */
   public int getNumOfLeaves() {
     netlock.readLock().lock();
@@ -463,7 +527,7 @@ public class NetworkTopology {
       netlock.readLock().unlock();
     }
   }
-    
+
   /** Return the distance between two nodes
    * It is assumed that the distance from one node to its parent is 1
    * The distance between two nodes is calculated by summing up their distances
@@ -509,8 +573,8 @@ public class NetworkTopology {
       return Integer.MAX_VALUE;
     }
     return dis+2;
-  } 
-    
+  }
+
   /** Check if two nodes are on the same rack
    * @param node1 one node (can be null)
    * @param node2 another node (can be null)
@@ -525,13 +589,44 @@ public class NetworkTopology {
       
     netlock.readLock().lock();
     try {
-      return node1.getParent()==node2.getParent();
+      return isSameParents(node1, node2);
     } finally {
       netlock.readLock().unlock();
     }
   }
-    
-  final private static Random r = new Random();
+  
+  /**
+   * Check if network topology is aware of NodeGroup
+   */
+  public boolean isNodeGroupAware() {
+    return false;
+  }
+  
+  /** 
+   * Return false directly as not aware of NodeGroup, to be override in sub-class
+   */
+  public boolean isOnSameNodeGroup(Node node1, Node node2) {
+    return false;
+  }
+
+  /**
+   * Compare the parents of each node for equality
+   * 
+   * <p>To be overridden in subclasses for specific NetworkTopology 
+   * implementations, as alternative to overriding the full 
+   * {@link #isOnSameRack(Node, Node)} method.
+   * 
+   * @param node1 the first node to compare
+   * @param node2 the second node to compare
+   * @return true if their parents are equal, false otherwise
+   * 
+   * @see #isOnSameRack(Node, Node)
+   */
+  protected boolean isSameParents(Node node1, Node node2) {
+    return node1.getParent()==node2.getParent();
+  }
+
+  final protected static Random r = new Random();
   /** randomly choose one node from <i>scope</i>
    * if scope starts with ~, choose one from the all nodes except for the
    * ones in <i>scope</i>; otherwise, choose one from <i>scope</i>
@@ -550,7 +645,7 @@ public class NetworkTopology {
       netlock.readLock().unlock();
     }
   }
-    
+
   private Node chooseRandom(String scope, String excludedScope){
     if (excludedScope != null) {
       if (scope.startsWith(excludedScope)) {
@@ -579,7 +674,25 @@ public class NetworkTopology {
     int leaveIndex = r.nextInt(numOfDatanodes);
     return innerNode.getLeaf(leaveIndex, node);
   }
-       
+
+  /** return leaves in <i>scope</i>
+   * @param scope a path string
+   * @return leaves nodes under specific scope
+   */
+  public List<Node> getLeaves(String scope) {
+    Node node = getNode(scope);
+    List<Node> leafNodes = new ArrayList<Node>();
+    if (!(node instanceof InnerNode)) {
+      leafNodes.add(node);
+    } else {
+      InnerNode innerNode = (InnerNode) node;
+      for (int i=0;i<innerNode.getNumOfLeaves();i++) {
+        leafNodes.add(innerNode.getLeaf(i, null));
+      }
+    }
+    return leafNodes;
+  }
+
   /** return the number of leaves in <i>scope</i> but not in <i>excludedNodes</i>
    * if scope starts with ~, return the number of nodes that are not
    * in <i>scope</i> and <i>excludedNodes</i>; 
@@ -619,7 +732,7 @@ public class NetworkTopology {
       netlock.readLock().unlock();
     }
   }
-    
+
   /** convert a network tree to a string */
   public String toString() {
     // print the number of racks
@@ -640,13 +753,12 @@ public class NetworkTopology {
     return tree.toString();
   }
 
-  /* swap two array items */
-  static private void swap(Node[] nodes, int i, int j) {
+  /** swap two array items */
+  static protected void swap(Node[] nodes, int i, int j) {
     Node tempNode;
     tempNode = nodes[j];
     nodes[j] = nodes[i];
     nodes[i] = tempNode;
-    
   }
   
   /** Sort nodes array by their distances to <i>reader</i>
@@ -697,4 +809,5 @@ public class NetworkTopology {
       swap(nodes, 0, r.nextInt(nodes.length));
     }
   }
+  
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenIdentifier.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenIdentifier.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenIdentifier.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenIdentifier.java Thu Jun 28 06:59:38 2012
@@ -31,6 +31,8 @@ import org.apache.hadoop.security.Hadoop
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.TokenIdentifier;
 
+import com.google.common.annotations.VisibleForTesting;
+
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Evolving
 public abstract class AbstractDelegationTokenIdentifier 
@@ -173,16 +175,17 @@ extends TokenIdentifier {
 	throw new IOException("Unknown version of delegation token " + 
                               version);
     }
-    owner.readFields(in);
-    renewer.readFields(in);
-    realUser.readFields(in);
+    owner.readFields(in, Text.DEFAULT_MAX_LEN);
+    renewer.readFields(in, Text.DEFAULT_MAX_LEN);
+    realUser.readFields(in, Text.DEFAULT_MAX_LEN);
     issueDate = WritableUtils.readVLong(in);
     maxDate = WritableUtils.readVLong(in);
     sequenceNumber = WritableUtils.readVInt(in);
     masterKeyId = WritableUtils.readVInt(in);
   }
 
-  public void write(DataOutput out) throws IOException {
+  @VisibleForTesting
+  void writeImpl(DataOutput out) throws IOException {
     out.writeByte(VERSION);
     owner.write(out);
     renewer.write(out);
@@ -193,6 +196,19 @@ extends TokenIdentifier {
     WritableUtils.writeVInt(out, masterKeyId);
   }
   
+  public void write(DataOutput out) throws IOException {
+    if (owner.getLength() > Text.DEFAULT_MAX_LEN) {
+      throw new IOException("owner is too long to be serialized!");
+    }
+    if (renewer.getLength() > Text.DEFAULT_MAX_LEN) {
+      throw new IOException("renewer is too long to be serialized!");
+    }
+    if (realUser.getLength() > Text.DEFAULT_MAX_LEN) {
+      throw new IOException("realuser is too long to be serialized!");
+    }
+    writeImpl(out);
+  }
+  
   public String toString() {
     StringBuilder buffer = new StringBuilder();
     buffer

Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java Thu Jun 28 06:59:38 2012
@@ -30,7 +30,7 @@ import java.util.concurrent.atomic.Atomi
 
 /**
  * The <code>ShutdownHookManager</code> enables running shutdownHook
- * in a determistic order, higher priority first.
+ * in a deterministic order, higher priority first.
  * <p/>
  * The JVM runs ShutdownHooks in a non-deterministic order or in parallel.
  * This class registers a single JVM shutdownHook and run all the
@@ -169,7 +169,7 @@ public class ShutdownHookManager {
   }
 
   /**
-   * Indicates if a shutdownHook is registered or nt.
+   * Indicates if a shutdownHook is registered or not.
    *
    * @param shutdownHook shutdownHook to check if registered.
    * @return TRUE/FALSE depending if the shutdownHook is is registered.
@@ -177,5 +177,14 @@ public class ShutdownHookManager {
   public boolean hasShutdownHook(Runnable shutdownHook) {
     return hooks.contains(new HookEntry(shutdownHook, 0));
   }
+  
+  /**
+   * Indicates if shutdown is in progress or not.
+   * 
+   * @return TRUE if the shutdown is in progress, otherwise FALSE.
+   */
+  public boolean isShutdownInProgress() {
+    return shutdownInProgress.get();
+  }
 
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c Thu Jun 28 06:59:38 2012
@@ -16,10 +16,7 @@
  * limitations under the License.
  */
 
-#if defined HAVE_CONFIG_H
-  #include <config.h>
-#endif
-
+#include "config.h"
 #include "org_apache_hadoop.h"
 #include "org_apache_hadoop_io_compress_lz4_Lz4Compressor.h"
 

Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c Thu Jun 28 06:59:38 2012
@@ -16,10 +16,7 @@
  * limitations under the License.
  */
 
-#if defined HAVE_CONFIG_H
-  #include <config.h>
-#endif
-
+#include "config.h"
 #include "org_apache_hadoop.h"
 #include "org_apache_hadoop_io_compress_lz4_Lz4Decompressor.h"
 

Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c Thu Jun 28 06:59:38 2012
@@ -16,36 +16,12 @@
  * limitations under the License.
  */
 
-#if defined HAVE_CONFIG_H
-  #include <config.h>
-#endif
-
-#if defined HADOOP_SNAPPY_LIBRARY
-
-#if defined HAVE_STDIO_H
-  #include <stdio.h>
-#else
-  #error 'stdio.h not found'
-#endif
-
-#if defined HAVE_STDLIB_H
-  #include <stdlib.h>
-#else
-  #error 'stdlib.h not found'
-#endif
-
-#if defined HAVE_STRING_H
-  #include <string.h>
-#else
-  #error 'string.h not found'
-#endif
-
-#if defined HAVE_DLFCN_H
-  #include <dlfcn.h>
-#else
-  #error 'dlfcn.h not found'
-#endif
+#include <dlfcn.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
 
+#include "config.h"
 #include "org_apache_hadoop_io_compress_snappy.h"
 #include "org_apache_hadoop_io_compress_snappy_SnappyCompressor.h"
 
@@ -123,5 +99,3 @@ JNIEXPORT jint JNICALL Java_org_apache_h
 
   return (jint)compressed_direct_buf_len;
 }
-
-#endif //define HADOOP_SNAPPY_LIBRARY

Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c Thu Jun 28 06:59:38 2012
@@ -16,36 +16,12 @@
  * limitations under the License.
  */
 
-#if defined HAVE_CONFIG_H
-  #include <config.h>
-#endif
-
-#if defined HADOOP_SNAPPY_LIBRARY
-
-#if defined HAVE_STDIO_H
-  #include <stdio.h>
-#else
-  #error 'stdio.h not found'
-#endif
-
-#if defined HAVE_STDLIB_H
-  #include <stdlib.h>
-#else
-  #error 'stdlib.h not found'
-#endif
-
-#if defined HAVE_STRING_H
-  #include <string.h>
-#else
-  #error 'string.h not found'
-#endif
-
-#if defined HAVE_DLFCN_H
-  #include <dlfcn.h>
-#else
-  #error 'dlfcn.h not found'
-#endif
+#include <dlfcn.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
 
+#include "config.h"
 #include "org_apache_hadoop_io_compress_snappy.h"
 #include "org_apache_hadoop_io_compress_snappy_SnappyDecompressor.h"
 
@@ -127,5 +103,3 @@ JNIEXPORT jint JNICALL Java_org_apache_h
 
   return (jint)uncompressed_direct_buf_len;
 }
-
-#endif //define HADOOP_SNAPPY_LIBRARY

Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/org_apache_hadoop_io_compress_snappy.h
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/org_apache_hadoop_io_compress_snappy.h?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/org_apache_hadoop_io_compress_snappy.h (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/org_apache_hadoop_io_compress_snappy.h Thu Jun 28 06:59:38 2012
@@ -17,42 +17,13 @@
  */
 
 
-#if !defined ORG_APACHE_HADOOP_IO_COMPRESS_SNAPPY_SNAPPY_H
+#ifndef ORG_APACHE_HADOOP_IO_COMPRESS_SNAPPY_SNAPPY_H
 #define ORG_APACHE_HADOOP_IO_COMPRESS_SNAPPY_SNAPPY_H
 
-
-#if defined HAVE_CONFIG_H
-  #include <config.h>
-#endif
-
-#if defined HADOOP_SNAPPY_LIBRARY
-
-  #if defined HAVE_STDDEF_H
-    #include <stddef.h>
-  #else
-    #error 'stddef.h not found'
-  #endif
-
-  #if defined HAVE_SNAPPY_C_H
-    #include <snappy-c.h>
-  #else
-    #error 'Please install snappy-development packages for your platform.'
-  #endif
-
-  #if defined HAVE_DLFCN_H
-    #include <dlfcn.h>
-  #else
-    #error "dlfcn.h not found"
-  #endif
-
-  #if defined HAVE_JNI_H
-    #include <jni.h>
-  #else
-    #error 'jni.h not found'
-  #endif
-
-  #include "org_apache_hadoop.h"
-
-#endif //define HADOOP_SNAPPY_LIBRARY
+#include "org_apache_hadoop.h"
+#include <dlfcn.h>
+#include <jni.h>
+#include <snappy-c.h>
+#include <stddef.h>
 
 #endif //ORG_APACHE_HADOOP_IO_COMPRESS_SNAPPY_SNAPPY_H

Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c Thu Jun 28 06:59:38 2012
@@ -16,34 +16,12 @@
  * limitations under the License.
  */
 
-#if defined HAVE_CONFIG_H
-  #include <config.h>
-#endif
-
-#if defined HAVE_STDIO_H
-  #include <stdio.h>
-#else
-  #error 'stdio.h not found'
-#endif  
-
-#if defined HAVE_STDLIB_H
-  #include <stdlib.h>
-#else
-  #error 'stdlib.h not found'
-#endif  
-
-#if defined HAVE_STRING_H
-  #include <string.h>
-#else
-  #error 'string.h not found'
-#endif  
-
-#if defined HAVE_DLFCN_H
-  #include <dlfcn.h>
-#else
-  #error 'dlfcn.h not found'
-#endif  
+#include <dlfcn.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
 
+#include "config.h"
 #include "org_apache_hadoop_io_compress_zlib.h"
 #include "org_apache_hadoop_io_compress_zlib_ZlibCompressor.h"
 

Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c Thu Jun 28 06:59:38 2012
@@ -16,34 +16,12 @@
  * limitations under the License.
  */
 
-#if defined HAVE_CONFIG_H
-  #include <config.h>
-#endif
-
-#if defined HAVE_STDIO_H
-  #include <stdio.h>
-#else
-  #error 'stdio.h not found'
-#endif  
-
-#if defined HAVE_STDLIB_H
-  #include <stdlib.h>
-#else
-  #error 'stdlib.h not found'
-#endif  
-
-#if defined HAVE_STRING_H
-  #include <string.h>
-#else
-  #error 'string.h not found'
-#endif  
-
-#if defined HAVE_DLFCN_H
-  #include <dlfcn.h>
-#else
-  #error 'dlfcn.h not found'
-#endif  
+#include <dlfcn.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
 
+#include "config.h"
 #include "org_apache_hadoop_io_compress_zlib.h"
 #include "org_apache_hadoop_io_compress_zlib_ZlibDecompressor.h"
 

Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/org_apache_hadoop_io_compress_zlib.h
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/org_apache_hadoop_io_compress_zlib.h?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/org_apache_hadoop_io_compress_zlib.h (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/org_apache_hadoop_io_compress_zlib.h Thu Jun 28 06:59:38 2012
@@ -19,40 +19,13 @@
 #if !defined ORG_APACHE_HADOOP_IO_COMPRESS_ZLIB_ZLIB_H
 #define ORG_APACHE_HADOOP_IO_COMPRESS_ZLIB_ZLIB_H
 
-#if defined HAVE_CONFIG_H
-  #include <config.h>
-#endif
-
-#if defined HAVE_STDDEF_H
-  #include <stddef.h>
-#else
-  #error 'stddef.h not found'
-#endif
-    
-#if defined HAVE_ZLIB_H
-  #include <zlib.h>
-#else
-  #error 'Please install zlib-development packages for your platform.'
-#endif
-    
-#if defined HAVE_ZCONF_H
-  #include <zconf.h>
-#else
-  #error 'Please install zlib-development packages for your platform.'
-#endif
-
-#if defined HAVE_DLFCN_H
-  #include <dlfcn.h>
-#else
-  #error "dlfcn.h not found"
-#endif  
-
-#if defined HAVE_JNI_H    
-  #include <jni.h>
-#else
-  #error 'jni.h not found'
-#endif
+#include <dlfcn.h>
+#include <jni.h>
+#include <stddef.h>
+#include <zconf.h>
+#include <zlib.h>
 
+#include "config.h"
 #include "org_apache_hadoop.h"
 
 /* A helper macro to convert the java 'stream-handle' to a z_stream pointer. */

Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c Thu Jun 28 06:59:38 2012
@@ -16,9 +16,6 @@
  * limitations under the License.
  */
 
-// get the autoconf settings
-#include "config.h"
-
 #include <assert.h>
 #include <errno.h>
 #include <fcntl.h>
@@ -32,6 +29,7 @@
 #include <sys/syscall.h>
 #include <unistd.h>
 
+#include "config.h"
 #include "org_apache_hadoop.h"
 #include "org_apache_hadoop_io_nativeio_NativeIO.h"
 #include "file_descriptor.h"

Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c Thu Jun 28 06:59:38 2012
@@ -16,9 +16,6 @@
  * limitations under the License.
  */
 
-// get the autoconf settings
-#include "config.h"
-
 #include <arpa/inet.h>
 #include <assert.h>
 #include <stdlib.h>
@@ -26,6 +23,7 @@
 #include <string.h>
 #include <unistd.h>
 
+#include "config.h"
 #include "org_apache_hadoop.h"
 #include "org_apache_hadoop_util_NativeCrc32.h"
 #include "gcc_optimizations.h"

Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org_apache_hadoop.h
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org_apache_hadoop.h?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org_apache_hadoop.h (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/native/src/org_apache_hadoop.h Thu Jun 28 06:59:38 2012
@@ -24,21 +24,10 @@
 #if !defined ORG_APACHE_HADOOP_H
 #define ORG_APACHE_HADOOP_H
 
-#if defined HAVE_CONFIG_H
-  #include <config.h>
-#endif
+#include <dlfcn.h>
+#include <jni.h>
 
-#if defined HAVE_DLFCN_H
-  #include <dlfcn.h>
-#else
-  #error "dlfcn.h not found"
-#endif  
-
-#if defined HAVE_JNI_H    
-  #include <jni.h>
-#else
-  #error 'jni.h not found'
-#endif
+#include "config.h"
 
 /* A helper macro to 'throw' a java exception. */ 
 #define THROW(env, exception_name, message) \

Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh Thu Jun 28 06:59:38 2012
@@ -65,7 +65,7 @@ export HADOOP_CLIENT_OPTS="-Xmx128m $HAD
 export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
 
 # Where log files are stored.  $HADOOP_HOME/logs by default.
-export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
+#export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
 
 # Where log files are stored in the secure data environment.
 export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}

Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml Thu Jun 28 06:59:38 2012
@@ -599,10 +599,9 @@
   </description>
 </property>
 
-<!-- Rack Configuration -->
-
+<!-- Topology Configuration -->
 <property>
-	<name>net.topology.node.switch.mapping.impl</name>
+  <name>net.topology.node.switch.mapping.impl</name>
   <value>org.apache.hadoop.net.ScriptBasedMapping</value>
   <description> The default implementation of the DNSToSwitchMapping. It
     invokes a script specified in net.topology.script.file.name to resolve
@@ -612,6 +611,13 @@
 </property>
 
 <property>
+  <name>net.topology.impl</name>
+  <value>org.apache.hadoop.net.NetworkTopology</value>
+  <description> The default implementation of NetworkTopology which is classic three layer one.
+  </description>
+</property>
+
+<property>
   <name>net.topology.script.file.name</name>
   <value></value>
   <description> The script name that should be invoked to resolve DNS names to

Propchange: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/core/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/core:r1346682-1354801

Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java Thu Jun 28 06:59:38 2012
@@ -663,6 +663,26 @@ public class TestConfiguration extends T
                  conf.getPattern("test.pattern3", defaultPattern).pattern());
   }
 
+  public void testPropertySource() throws IOException {
+    out = new BufferedWriter(new FileWriter(CONFIG));
+    startConfig();
+    appendProperty("test.foo", "bar");
+    endConfig();
+    Path fileResource = new Path(CONFIG);
+    conf.addResource(fileResource);
+    conf.set("fs.defaultFS", "value");
+    assertEquals(
+        "Resource string returned for a file-loaded property" +
+        " must be a proper absolute path",
+        fileResource,
+        new Path(conf.getPropertySource("test.foo")));
+    assertEquals("Resource string returned for a set() property must be null",
+        null,
+        conf.getPropertySource("fs.defaultFS"));
+    assertEquals("Resource string returned for an unset property must be null",
+        null, conf.getPropertySource("fs.defaultFoo"));
+  }
+
   public void testSocketAddress() throws IOException {
     Configuration conf = new Configuration();
     final String defaultAddr = "host:1";

Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemTestSetup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemTestSetup.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemTestSetup.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemTestSetup.java Thu Jun 28 06:59:38 2012
@@ -51,7 +51,19 @@ public class ViewFileSystemTestSetup {
     /**
      * create the test root on local_fs - the  mount table will point here
      */
-    fsTarget.mkdirs(FileSystemTestHelper.getTestRootPath(fsTarget));
+    Path targetOfTests = FileSystemTestHelper.getTestRootPath(fsTarget);
+    // In case previous test was killed before cleanup
+    fsTarget.delete(targetOfTests, true);
+    fsTarget.mkdirs(targetOfTests);
+
+    // Setup a link from viewfs to targetfs for the first component of
+    // path of testdir.
+    String testDir = FileSystemTestHelper.getTestRootPath(fsTarget).toUri()
+        .getPath();
+    int indexOf2ndSlash = testDir.indexOf('/', 1);
+    String testDirFirstComponent = testDir.substring(0, indexOf2ndSlash);
+    ConfigUtil.addLink(conf, testDirFirstComponent, fsTarget.makeQualified(
+        new Path(testDirFirstComponent)).toUri());
 
     // viewFs://home => fsTarget://home
     String homeDirRoot = fsTarget.getHomeDirectory()

Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java Thu Jun 28 06:59:38 2012
@@ -21,9 +21,13 @@ package org.apache.hadoop.io;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
+import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
 
 import org.junit.Test;
 import org.mockito.Mockito;
@@ -32,7 +36,8 @@ import org.mockito.Mockito;
  * Test cases for IOUtils.java
  */
 public class TestIOUtils {
-
+  private static final String TEST_FILE_NAME = "test_file";
+  
   @Test
   public void testCopyBytesShouldCloseStreamsWhenCloseIsTrue() throws Exception {
     InputStream inputStream = Mockito.mock(InputStream.class);
@@ -110,4 +115,41 @@ public class TestIOUtils {
     Mockito.verify(outputStream, Mockito.atLeastOnce()).close();
   }
   
+  @Test
+  public void testWriteFully() throws IOException {
+    final int INPUT_BUFFER_LEN = 10000;
+    final int HALFWAY = 1 + (INPUT_BUFFER_LEN / 2);
+    byte[] input = new byte[INPUT_BUFFER_LEN];
+    for (int i = 0; i < input.length; i++) {
+      input[i] = (byte)(i & 0xff);
+    }
+    byte[] output = new byte[input.length];
+    
+    try {
+      RandomAccessFile raf = new RandomAccessFile(TEST_FILE_NAME, "rw");
+      FileChannel fc = raf.getChannel();
+      ByteBuffer buf = ByteBuffer.wrap(input);
+      IOUtils.writeFully(fc, buf);
+      raf.seek(0);
+      raf.read(output);
+      for (int i = 0; i < input.length; i++) {
+        assertEquals(input[i], output[i]);
+      }
+      buf.rewind();
+      IOUtils.writeFully(fc, buf, HALFWAY);
+      for (int i = 0; i < HALFWAY; i++) {
+        assertEquals(input[i], output[i]);
+      }
+      raf.seek(0);
+      raf.read(output);
+      for (int i = HALFWAY; i < input.length; i++) {
+        assertEquals(input[i - HALFWAY], output[i]);
+      }
+    } finally {
+      File f = new File(TEST_FILE_NAME);
+      if (f.exists()) {
+        f.delete();
+      }
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java Thu Jun 28 06:59:38 2012
@@ -137,38 +137,38 @@ public class TestText extends TestCase {
     }
   }
   
-  public void doTestLimitedIO(String str, int strLen) throws IOException {
+  public void doTestLimitedIO(String str, int len) throws IOException {
     DataOutputBuffer out = new DataOutputBuffer();
     DataInputBuffer in = new DataInputBuffer();
 
     out.reset();
     try {
-      Text.writeString(out, str, strLen);
+      Text.writeString(out, str, len);
       fail("expected writeString to fail when told to write a string " +
           "that was too long!  The string was '" + str + "'");
     } catch (IOException e) {
     }
-    Text.writeString(out, str, strLen + 1);
+    Text.writeString(out, str, len + 1);
 
     // test that it reads correctly
     in.reset(out.getData(), out.getLength());
-    in.mark(strLen);
+    in.mark(len);
     String after;
     try {
-      after = Text.readString(in, strLen);
+      after = Text.readString(in, len);
       fail("expected readString to fail when told to read a string " +
           "that was too long!  The string was '" + str + "'");
     } catch (IOException e) {
     }
     in.reset();
-    after = Text.readString(in, strLen + 1);
+    after = Text.readString(in, len + 1);
     assertTrue(str.equals(after));
   }
   
   public void testLimitedIO() throws Exception {
-    doTestLimitedIO("abcd", 4);
-    doTestLimitedIO("", 0);
-    doTestLimitedIO("1", 1);
+    doTestLimitedIO("abcd", 3);
+    doTestLimitedIO("foo bar baz", 10);
+    doTestLimitedIO("1", 0);
   }
 
   public void testCompare() throws Exception {

Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java Thu Jun 28 06:59:38 2012
@@ -18,50 +18,55 @@
 
 package org.apache.hadoop.ipc;
 
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounterGt;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
 import java.io.Closeable;
 import java.io.IOException;
-import java.net.ConnectException;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
 import java.lang.management.ManagementFactory;
 import java.lang.management.ThreadInfo;
 import java.lang.management.ThreadMXBean;
 import java.lang.reflect.InvocationHandler;
 import java.lang.reflect.Method;
 import java.lang.reflect.Proxy;
+import java.net.ConnectException;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
 import java.util.Arrays;
 
 import javax.net.SocketFactory;
 
-import org.apache.commons.logging.*;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.io.UTF8;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryProxy;
 import org.apache.hadoop.ipc.Client.ConnectionId;
-import org.apache.hadoop.ipc.TestSaslRPC.TestSaslImpl;
-import org.apache.hadoop.ipc.TestSaslRPC.TestSaslProtocol;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.authorize.Service;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.MockitoUtil;
 import org.junit.Test;
-import static org.junit.Assert.*;
 
 import com.google.protobuf.DescriptorProtos;
 import com.google.protobuf.DescriptorProtos.EnumDescriptorProto;
 
-import static org.apache.hadoop.test.MetricsAsserts.*;
-
 /** Unit tests for RPC. */
 @SuppressWarnings("deprecation")
 public class TestRPC {
@@ -250,7 +255,8 @@ public class TestRPC {
     @Override
     public <T> ProtocolProxy<T> getProxy(Class<T> protocol, long clientVersion,
         InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
-        SocketFactory factory, int rpcTimeout) throws IOException {
+        SocketFactory factory, int rpcTimeout, RetryPolicy connectionRetryPolicy
+        ) throws IOException {
       T proxy = (T) Proxy.newProxyInstance(protocol.getClassLoader(),
               new Class[] { protocol }, new StoppedInvocationHandler());
       return new ProtocolProxy<T>(protocol, proxy, false);

Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java Thu Jun 28 06:59:38 2012
@@ -19,6 +19,7 @@
 package org.apache.hadoop.security.token.delegation;
 
 import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
 import java.io.DataInput;
 import java.io.DataInputStream;
 import java.io.DataOutput;
@@ -387,4 +388,46 @@ public class TestDelegationToken {
     }
   }
 
+  private boolean testDelegationTokenIdentiferSerializationRoundTrip(Text owner,
+      Text renewer, Text realUser) throws IOException {
+    TestDelegationTokenIdentifier dtid = new TestDelegationTokenIdentifier(
+        owner, renewer, realUser);
+    DataOutputBuffer out = new DataOutputBuffer();
+    dtid.writeImpl(out);
+    DataInputBuffer in = new DataInputBuffer();
+    in.reset(out.getData(), out.getLength());
+    try {
+      TestDelegationTokenIdentifier dtid2 =
+          new TestDelegationTokenIdentifier();
+      dtid2.readFields(in);
+      assertTrue(dtid.equals(dtid2));
+      return true;
+    } catch(IOException e){
+      return false;
+    }
+  }
+      
+  @Test
+  public void testSimpleDtidSerialization() throws IOException {
+    assertTrue(testDelegationTokenIdentiferSerializationRoundTrip(
+        new Text("owner"), new Text("renewer"), new Text("realUser")));
+    assertTrue(testDelegationTokenIdentiferSerializationRoundTrip(
+        new Text(""), new Text(""), new Text("")));
+    assertTrue(testDelegationTokenIdentiferSerializationRoundTrip(
+        new Text(""), new Text("b"), new Text("")));
+  }
+  
+  @Test
+  public void testOverlongDtidSerialization() throws IOException {
+    byte[] bigBuf = new byte[Text.DEFAULT_MAX_LEN + 1];
+    for (int i = 0; i < bigBuf.length; i++) {
+      bigBuf[i] = 0;
+    }
+    assertFalse(testDelegationTokenIdentiferSerializationRoundTrip(
+        new Text(bigBuf), new Text("renewer"), new Text("realUser")));
+    assertFalse(testDelegationTokenIdentiferSerializationRoundTrip(
+        new Text("owner"), new Text(bigBuf), new Text("realUser")));
+    assertFalse(testDelegationTokenIdentiferSerializationRoundTrip(
+        new Text("owner"), new Text("renewer"), new Text(bigBuf)));
+  }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java Thu Jun 28 06:59:38 2012
@@ -15,15 +15,18 @@ package org.apache.hadoop.util;
 
 import com.google.common.base.Preconditions;
 
+import java.io.BufferedOutputStream;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
+import java.io.OutputStream;
 import java.net.URL;
 import java.net.URLDecoder;
 import java.text.MessageFormat;
 import java.util.Enumeration;
+import java.util.jar.JarFile;
 import java.util.jar.JarOutputStream;
 import java.util.jar.Manifest;
 import java.util.zip.ZipEntry;
@@ -37,10 +40,37 @@ import java.util.zip.ZipOutputStream;
  */
 public class JarFinder {
 
-  private static void zipDir(File dir, String relativePath, ZipOutputStream zos)
+  private static void copyToZipStream(InputStream is, ZipEntry entry,
+                              ZipOutputStream zos) throws IOException {
+    zos.putNextEntry(entry);
+    byte[] arr = new byte[4096];
+    int read = is.read(arr);
+    while (read > -1) {
+      zos.write(arr, 0, read);
+      read = is.read(arr);
+    }
+    is.close();
+    zos.closeEntry();
+  }
+
+  public static void jarDir(File dir, String relativePath, ZipOutputStream zos)
     throws IOException {
     Preconditions.checkNotNull(relativePath, "relativePath");
     Preconditions.checkNotNull(zos, "zos");
+
+    // by JAR spec, if there is a manifest, it must be the first entry in the
+    // ZIP.
+    File manifestFile = new File(dir, JarFile.MANIFEST_NAME);
+    ZipEntry manifestEntry = new ZipEntry(JarFile.MANIFEST_NAME);
+    if (!manifestFile.exists()) {
+      zos.putNextEntry(manifestEntry);
+      new Manifest().write(new BufferedOutputStream(zos));
+      zos.closeEntry();
+    } else {
+      InputStream is = new FileInputStream(manifestFile);
+      copyToZipStream(is, manifestEntry, zos);
+    }
+    zos.closeEntry();
     zipDir(dir, relativePath, zos, true);
     zos.close();
   }
@@ -62,17 +92,12 @@ public class JarFinder {
           zipDir(file, relativePath + f.getName() + "/", zos, false);
         }
         else {
-          ZipEntry anEntry = new ZipEntry(relativePath + f.getName());
-          zos.putNextEntry(anEntry);
-          InputStream is = new FileInputStream(f);
-          byte[] arr = new byte[4096];
-          int read = is.read(arr);
-          while (read > -1) {
-            zos.write(arr, 0, read);
-            read = is.read(arr);
+          String path = relativePath + f.getName();
+          if (!path.equals(JarFile.MANIFEST_NAME)) {
+            ZipEntry anEntry = new ZipEntry(path);
+            InputStream is = new FileInputStream(f);
+            copyToZipStream(is, anEntry, zos);
           }
-          is.close();
-          zos.closeEntry();
         }
       }
     }
@@ -88,9 +113,8 @@ public class JarFinder {
                                                    jarDir));
       }
     }
-    JarOutputStream zos = new JarOutputStream(new FileOutputStream(jarFile),
-                                              new Manifest());
-    zipDir(dir, "", zos);
+    JarOutputStream zos = new JarOutputStream(new FileOutputStream(jarFile));
+    jarDir(dir, "", zos);
   }
 
   /**
@@ -142,5 +166,4 @@ public class JarFinder {
     }
     return null;
   }
-
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestJarFinder.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestJarFinder.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestJarFinder.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestJarFinder.java Thu Jun 28 06:59:38 2012
@@ -22,21 +22,105 @@ import org.apache.commons.logging.LogFac
 import org.junit.Assert;
 import org.junit.Test;
 
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
 import java.io.File;
+import java.io.FileOutputStream;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.Writer;
+import java.text.MessageFormat;
+import java.util.Properties;
+import java.util.jar.JarInputStream;
+import java.util.jar.JarOutputStream;
+import java.util.jar.Manifest;
 
 public class TestJarFinder {
 
   @Test
-  public void testAppend() throws Exception {
+  public void testJar() throws Exception {
 
     //picking a class that is for sure in a JAR in the classpath
     String jar = JarFinder.getJar(LogFactory.class);
     Assert.assertTrue(new File(jar).exists());
+  }
+
+  private static void delete(File file) throws IOException {
+    if (file.getAbsolutePath().length() < 5) {
+      throw new IllegalArgumentException(
+        MessageFormat.format("Path [{0}] is too short, not deleting",
+                             file.getAbsolutePath()));
+    }
+    if (file.exists()) {
+      if (file.isDirectory()) {
+        File[] children = file.listFiles();
+        if (children != null) {
+          for (File child : children) {
+            delete(child);
+          }
+        }
+      }
+      if (!file.delete()) {
+        throw new RuntimeException(
+          MessageFormat.format("Could not delete path [{0}]",
+                               file.getAbsolutePath()));
+      }
+    }
+  }
 
+  @Test
+  public void testExpandedClasspath() throws Exception {
     //picking a class that is for sure in a directory in the classpath
     //in this case the JAR is created on the fly
-    jar = JarFinder.getJar(TestJarFinder.class);
+    String jar = JarFinder.getJar(TestJarFinder.class);
     Assert.assertTrue(new File(jar).exists());
   }
 
+  @Test
+  public void testExistingManifest() throws Exception {
+    File dir = new File(System.getProperty("test.build.dir", "target/test-dir"),
+                        TestJarFinder.class.getName() + "-testExistingManifest");
+    delete(dir);
+    dir.mkdirs();
+
+    File metaInfDir = new File(dir, "META-INF");
+    metaInfDir.mkdirs();
+    File manifestFile = new File(metaInfDir, "MANIFEST.MF");
+    Manifest manifest = new Manifest();
+    OutputStream os = new FileOutputStream(manifestFile);
+    manifest.write(os);
+    os.close();
+
+    File propsFile = new File(dir, "props.properties");
+    Writer writer = new FileWriter(propsFile);
+    new Properties().store(writer, "");
+    writer.close();
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    JarOutputStream zos = new JarOutputStream(baos);
+    JarFinder.jarDir(dir, "", zos);
+    JarInputStream jis =
+      new JarInputStream(new ByteArrayInputStream(baos.toByteArray()));
+    Assert.assertNotNull(jis.getManifest());
+    jis.close();
+  }
+
+  @Test
+  public void testNoManifest() throws Exception {
+    File dir = new File(System.getProperty("test.build.dir", "target/test-dir"),
+                        TestJarFinder.class.getName() + "-testNoManifest");
+    delete(dir);
+    dir.mkdirs();
+    File propsFile = new File(dir, "props.properties");
+    Writer writer = new FileWriter(propsFile);
+    new Properties().store(writer, "");
+    writer.close();
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    JarOutputStream zos = new JarOutputStream(baos);
+    JarFinder.jarDir(dir, "", zos);
+    JarInputStream jis =
+      new JarInputStream(new ByteArrayInputStream(baos.toByteArray()));
+    Assert.assertNotNull(jis.getManifest());
+    jis.close();
+  }
 }



Mime
View raw message