hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jmhs...@apache.org
Subject svn commit: r1445918 [6/29] - in /hbase/branches/hbase-7290: ./ bin/ conf/ dev-support/ hbase-client/ hbase-common/ hbase-common/src/main/java/org/apache/hadoop/hbase/ hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/ hbase-common/src/mai...
Date Wed, 13 Feb 2013 20:58:32 GMT
Modified: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientProtocol.java?rev=1445918&r1=1445917&r2=1445918&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientProtocol.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientProtocol.java Wed Feb 13 20:58:23 2013
@@ -20,7 +20,7 @@ package org.apache.hadoop.hbase.client;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hbase.ipc.VersionedProtocol;
+import org.apache.hadoop.hbase.IpcProtocol;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
 import org.apache.hadoop.hbase.security.TokenInfo;
 import org.apache.hadoop.hbase.security.KerberosInfo;
@@ -33,7 +33,5 @@ import org.apache.hadoop.hbase.security.
 @TokenInfo("HBASE_AUTH_TOKEN")
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
-public interface ClientProtocol extends
-    ClientService.BlockingInterface, VersionedProtocol {
-  public static final long VERSION = 1L;
-}
+public interface ClientProtocol
+extends ClientService.BlockingInterface, IpcProtocol {}
\ No newline at end of file

Modified: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java?rev=1445918&r1=1445917&r2=1445918&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java Wed Feb 13 20:58:23 2013
@@ -69,8 +69,8 @@ public class ClientScanner extends Abstr
     /**
      * Create a new ClientScanner for the specified table. An HConnection will be
      * retrieved using the passed Configuration.
-     * Note that the passed {@link Scan}'s start row maybe changed changed. 
-     * 
+     * Note that the passed {@link Scan}'s start row maybe changed changed.
+     *
      * @param conf The {@link Configuration} to use.
      * @param scan {@link Scan} to use in this scanner
      * @param tableName The table that we wish to scan
@@ -80,11 +80,11 @@ public class ClientScanner extends Abstr
         final byte[] tableName) throws IOException {
       this(conf, scan, tableName, HConnectionManager.getConnection(conf));
     }
- 
+
     /**
      * Create a new ClientScanner for the specified table
-     * Note that the passed {@link Scan}'s start row maybe changed changed. 
-     * 
+     * Note that the passed {@link Scan}'s start row maybe changed changed.
+     *
      * @param conf The {@link Configuration} to use.
      * @param scan {@link Scan} to use in this scanner
      * @param tableName The table that we wish to scan
@@ -250,7 +250,6 @@ public class ClientScanner extends Abstr
       if (this.scanMetrics == null) {
         return;
       }
-      final DataOutputBuffer d = new DataOutputBuffer();
       MapReduceProtos.ScanMetrics pScanMetrics = ProtobufUtil.toScanMetrics(scanMetrics);
       scan.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA, pScanMetrics.toByteArray());
     }

Modified: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/Delete.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/Delete.java?rev=1445918&r1=1445917&r2=1445918&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/Delete.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/Delete.java Wed Feb 13 20:58:23 2013
@@ -74,12 +74,11 @@ public class Delete extends Mutation imp
    * @param row row key
    */
   public Delete(byte [] row) {
-    this(row, HConstants.LATEST_TIMESTAMP, null);
+    this(row, HConstants.LATEST_TIMESTAMP);
   }
 
   /**
-   * Create a Delete operation for the specified row and timestamp, using
-   * an optional row lock.<p>
+   * Create a Delete operation for the specified row and timestamp.<p>
    *
    * If no further operations are done, this will delete all columns in all
    * families of the specified row with a timestamp less than or equal to the
@@ -89,14 +88,10 @@ public class Delete extends Mutation imp
    * families or columns, you must specify each timestamp individually.
    * @param row row key
    * @param timestamp maximum version timestamp (only for delete row)
-   * @param rowLock previously acquired row lock, or null
    */
-  public Delete(byte [] row, long timestamp, RowLock rowLock) {
+  public Delete(byte [] row, long timestamp) {
     this.row = row;
     this.ts = timestamp;
-    if (rowLock != null) {
-    	this.lockId = rowLock.getLockId();
-    }
   }
 
   /**
@@ -105,7 +100,6 @@ public class Delete extends Mutation imp
   public Delete(final Delete d) {
     this.row = d.getRow();
     this.ts = d.getTimeStamp();
-    this.lockId = d.getLockId();
     this.familyMap.putAll(d.getFamilyMap());
     this.writeToWAL = d.writeToWAL;
   }

Modified: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/Get.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/Get.java?rev=1445918&r1=1445917&r2=1445918&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/Get.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/Get.java Wed Feb 13 20:58:23 2013
@@ -18,6 +18,7 @@
  */
 package org.apache.hadoop.hbase.client;
 
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hbase.HConstants;
@@ -66,7 +67,6 @@ public class Get extends OperationWithAt
   implements Row, Comparable<Row> {
 
   private byte [] row = null;
-  private long lockId = -1L;
   private int maxVersions = 1;
   private boolean cacheBlocks = true;
   private int storeLimit = -1;
@@ -84,22 +84,7 @@ public class Get extends OperationWithAt
    * @param row row key
    */
   public Get(byte [] row) {
-    this(row, null);
-  }
-
-  /**
-   * Create a Get operation for the specified row, using an existing row lock.
-   * <p>
-   * If no further operations are done, this will get the latest version of
-   * all columns in all families of the specified row.
-   * @param row row key
-   * @param rowLock previously acquired row lock, or null
-   */
-  public Get(byte [] row, RowLock rowLock) {
     this.row = row;
-    if(rowLock != null) {
-      this.lockId = rowLock.getLockId();
-    }
   }
 
   /**
@@ -261,22 +246,6 @@ public class Get extends OperationWithAt
   }
 
   /**
-   * Method for retrieving the get's RowLock
-   * @return RowLock
-   */
-  public RowLock getRowLock() {
-    return new RowLock(this.row, this.lockId);
-  }
-
-  /**
-   * Method for retrieving the get's lockId
-   * @return lockId
-   */
-  public long getLockId() {
-    return this.lockId;
-  }
-
-  /**
    * Method for retrieving the get's maximum number of version
    * @return the maximum number of version to fetch for this get
    */
@@ -418,8 +387,20 @@ public class Get extends OperationWithAt
   }
 
   //Row
+  @Override
   public int compareTo(Row other) {
     return Bytes.compareTo(this.getRow(), other.getRow());
   }
 
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj) {
+      return true;
+    }
+    if (obj == null || getClass() != obj.getClass()) {
+      return false;
+    }
+    Row other = (Row) obj;
+    return compareTo(other) == 0;
+  }
 }

Modified: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnection.java?rev=1445918&r1=1445917&r2=1445918&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnection.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnection.java Wed Feb 13 20:58:23 2013
@@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.MasterNot
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.catalog.CatalogTracker;
 import org.apache.hadoop.hbase.client.coprocessor.Batch;
-import org.apache.hadoop.hbase.ipc.CoprocessorProtocol;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 
 /**
@@ -177,8 +176,20 @@ public interface HConnection extends Abo
    * @return list of region locations for all regions of table
    * @throws IOException
    */
-  public List<HRegionLocation> locateRegions(byte[] tableName)
+  public List<HRegionLocation> locateRegions(final byte[] tableName)
   throws IOException;
+  
+  /**
+   * Gets the locations of all regions in the specified table, <i>tableName</i>.
+   * @param tableName table to get regions of
+   * @param useCache Should we use the cache to retrieve the region information.
+   * @param offlined True if we are to include offlined regions, false and we'll leave out offlined
+   *          regions from returned list.
+   * @return list of region locations for all regions of table
+   * @throws IOException
+   */
+  public List<HRegionLocation> locateRegions(final byte[] tableName, final boolean useCache,
+      final boolean offlined) throws IOException;
 
   /**
    * Returns a {@link MasterAdminProtocol} to the active master
@@ -299,36 +310,6 @@ public interface HConnection extends Abo
       Object[] results,
       Batch.Callback<R> callback) throws IOException, InterruptedException;
 
-
-  /**
-   * Executes the given
-   * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call}
-   * callable for each row in the given list and invokes
-   * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[], byte[], Object)}
-   * for each result returned.
-   *
-   * @param protocol the protocol interface being called
-   * @param rows a list of row keys for which the callable should be invoked
-   * @param tableName table name for the coprocessor invoked
-   * @param pool ExecutorService used to submit the calls per row
-   * @param call instance on which to invoke
-   * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call(Object)}
-   * for each row
-   * @param callback instance on which to invoke
-   * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[], byte[], Object)}
-   * for each result
-   * @param <T> the protocol interface type
-   * @param <R> the callable's return type
-   * @throws IOException
-   */
-  public <T extends CoprocessorProtocol,R> void processExecs(
-      final Class<T> protocol,
-      List<byte[]> rows,
-      final byte[] tableName,
-      ExecutorService pool,
-      final Batch.Call<T,R> call,
-      final Batch.Callback<R> callback) throws IOException, Throwable;
-
   /**
    * Enable or disable region cache prefetch for the table. It will be
    * applied for the given table's all HTable instances within this

Modified: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java?rev=1445918&r1=1445917&r2=1445918&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java Wed Feb 13 20:58:23 2013
@@ -35,9 +35,9 @@ import java.util.LinkedHashMap;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
+import java.util.NavigableMap;
 import java.util.Map.Entry;
 import java.util.Set;
-import java.util.TreeMap;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.CopyOnWriteArraySet;
@@ -64,6 +64,7 @@ import org.apache.hadoop.hbase.MasterAdm
 import org.apache.hadoop.hbase.MasterMonitorProtocol;
 import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.MasterProtocol;
+import org.apache.hadoop.hbase.IpcProtocol;
 import org.apache.hadoop.hbase.RegionMovedException;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.ServerName;
@@ -73,10 +74,9 @@ import org.apache.hadoop.hbase.ZooKeeper
 import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
 import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
 import org.apache.hadoop.hbase.client.coprocessor.Batch;
-import org.apache.hadoop.hbase.ipc.CoprocessorProtocol;
-import org.apache.hadoop.hbase.ipc.ExecRPCInvoker;
-import org.apache.hadoop.hbase.ipc.HBaseRPC;
-import org.apache.hadoop.hbase.ipc.VersionedProtocol;
+import org.apache.hadoop.hbase.ipc.HBaseClientRPC;
+import org.apache.hadoop.hbase.ipc.ProtobufRpcClientEngine;
+import org.apache.hadoop.hbase.ipc.RpcClientEngine;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
@@ -85,6 +85,7 @@ import org.apache.hadoop.hbase.protobuf.
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Addressing;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.SoftValueSortedMap;
 import org.apache.hadoop.hbase.util.Triple;
@@ -119,7 +120,7 @@ import com.google.protobuf.ServiceExcept
  * <p>But sharing connections
  * makes clean up of {@link HConnection} instances a little awkward.  Currently,
  * clients cleanup by calling
- * {@link #deleteConnection(Configuration, boolean)}.  This will shutdown the
+ * {@link #deleteConnection(Configuration)}.  This will shutdown the
  * zookeeper connection the HConnection was using and clean up all
  * HConnection resources as well as stopping proxies to servers out on the
  * cluster. Not running the cleanup will not end the world; it'll
@@ -140,7 +141,7 @@ import com.google.protobuf.ServiceExcept
  * }
  * </pre>
  * <p>Cleanup used to be done inside in a shutdown hook.  On startup we'd
- * register a shutdown hook that called {@link #deleteAllConnections(boolean)}
+ * register a shutdown hook that called {@link #deleteAllConnections()}
  * on its way out but the order in which shutdown hooks run is not defined so
  * were problematic for clients of HConnection that wanted to register their
  * own shutdown hooks so we removed ours though this shifts the onus for
@@ -213,6 +214,10 @@ public class HConnectionManager {
       if (connection == null) {
         connection = new HConnectionImplementation(conf, true);
         HBASE_INSTANCES.put(connectionKey, connection);
+      } else if (connection.isClosed()) {
+        HConnectionManager.deleteConnection(connectionKey, true);
+        connection = new HConnectionImplementation(conf, true);
+        HBASE_INSTANCES.put(connectionKey, connection);
       }
       connection.incCount();
       return connection;
@@ -242,14 +247,9 @@ public class HConnectionManager {
    * @param conf
    *          configuration whose identity is used to find {@link HConnection}
    *          instance.
-   * @param stopProxy
-   *          Shuts down all the proxy's put up to cluster members including to
-   *          cluster HMaster. Calls
-   *          {@link HBaseRPC#stopProxy(org.apache.hadoop.hbase.ipc.VersionedProtocol)}
-   *          .
    */
-  public static void deleteConnection(Configuration conf, boolean stopProxy) {
-    deleteConnection(new HConnectionKey(conf), stopProxy, false);
+  public static void deleteConnection(Configuration conf) {
+    deleteConnection(new HConnectionKey(conf), false);
   }
 
   /**
@@ -260,40 +260,37 @@ public class HConnectionManager {
    * @param connection
    */
   public static void deleteStaleConnection(HConnection connection) {
-    deleteConnection(connection, true, true);
+    deleteConnection(connection, true);
   }
 
   /**
    * Delete information for all connections.
-   * @param stopProxy stop the proxy as well
    * @throws IOException
    */
-  public static void deleteAllConnections(boolean stopProxy) {
+  public static void deleteAllConnections() {
     synchronized (HBASE_INSTANCES) {
       Set<HConnectionKey> connectionKeys = new HashSet<HConnectionKey>();
       connectionKeys.addAll(HBASE_INSTANCES.keySet());
       for (HConnectionKey connectionKey : connectionKeys) {
-        deleteConnection(connectionKey, stopProxy, false);
+        deleteConnection(connectionKey, false);
       }
       HBASE_INSTANCES.clear();
     }
   }
 
-  private static void deleteConnection(HConnection connection, boolean stopProxy,
-      boolean staleConnection) {
+  private static void deleteConnection(HConnection connection, boolean staleConnection) {
     synchronized (HBASE_INSTANCES) {
       for (Entry<HConnectionKey, HConnectionImplementation> connectionEntry : HBASE_INSTANCES
           .entrySet()) {
         if (connectionEntry.getValue() == connection) {
-          deleteConnection(connectionEntry.getKey(), stopProxy, staleConnection);
+          deleteConnection(connectionEntry.getKey(), staleConnection);
           break;
         }
       }
     }
   }
 
-  private static void deleteConnection(HConnectionKey connectionKey,
-      boolean stopProxy, boolean staleConnection) {
+  private static void deleteConnection(HConnectionKey connectionKey, boolean staleConnection) {
     synchronized (HBASE_INSTANCES) {
       HConnectionImplementation connection = HBASE_INSTANCES
           .get(connectionKey);
@@ -301,11 +298,9 @@ public class HConnectionManager {
         connection.decCount();
         if (connection.isZeroReference() || staleConnection) {
           HBASE_INSTANCES.remove(connectionKey);
-          connection.close(stopProxy);
-        } else if (stopProxy) {
-          connection.stopProxyOnClose(stopProxy);
+          connection.internalClose();
         }
-      }else {
+      } else {
         LOG.error("Connection not found in the list, can't delete it "+
           "(connection key="+connectionKey+"). May be the key was modified?");
       }
@@ -413,7 +408,7 @@ public class HConnectionManager {
    *
    */
   public static class HConnectionKey {
-    public static String[] CONNECTION_PROPERTIES = new String[] {
+    final static String[] CONNECTION_PROPERTIES = new String[] {
         HConstants.ZOOKEEPER_QUORUM, HConstants.ZOOKEEPER_ZNODE_PARENT,
         HConstants.ZOOKEEPER_CLIENT_PORT,
         HConstants.ZOOKEEPER_RECOVERABLE_WAITTIME,
@@ -467,6 +462,9 @@ public class HConnectionManager {
       return result;
     }
 
+
+    @edu.umd.cs.findbugs.annotations.SuppressWarnings (value="ES_COMPARING_STRINGS_WITH_EQ",
+        justification="Optimization")
     @Override
     public boolean equals(Object obj) {
       if (this == obj)
@@ -492,6 +490,7 @@ public class HConnectionManager {
         for (String property : CONNECTION_PROPERTIES) {
           String thisValue = this.properties.get(property);
           String thatValue = that.properties.get(property);
+          //noinspection StringEquality
           if (thisValue == thatValue) {
             continue;
           }
@@ -512,7 +511,7 @@ public class HConnectionManager {
     }
   }
 
-  /* Encapsulates connection to zookeeper and regionservers.*/
+  /** Encapsulates connection to zookeeper and regionservers.*/
   static class HConnectionImplementation implements HConnection, Closeable {
     static final Log LOG = LogFactory.getLog(HConnectionImplementation.class);
     private final Class<? extends AdminProtocol> adminClass;
@@ -543,9 +542,12 @@ public class HConnectionManager {
 
     private final Configuration conf;
 
+    // client RPC
+    private RpcClientEngine rpcEngine;
+
     // Known region ServerName.toString() -> RegionClient/Admin
-    private final ConcurrentHashMap<String, Map<String, VersionedProtocol>> servers =
-      new ConcurrentHashMap<String, Map<String, VersionedProtocol>>();
+    private final ConcurrentHashMap<String, Map<String, IpcProtocol>> servers =
+      new ConcurrentHashMap<String, Map<String, IpcProtocol>>();
     private final ConcurrentHashMap<String, String> connectionLock =
       new ConcurrentHashMap<String, String>();
 
@@ -569,7 +571,6 @@ public class HConnectionManager {
     private final Set<Integer> regionCachePrefetchDisabledTables =
       new CopyOnWriteArraySet<Integer>();
 
-    private boolean stopProxy;
     private int refCount;
 
     // indicates whether this connection's life cycle is managed (by us)
@@ -583,6 +584,9 @@ public class HConnectionManager {
     throws ZooKeeperConnectionException {
       this.conf = conf;
       this.managed = managed;
+      // ProtobufRpcClientEngine is the main RpcClientEngine implementation,
+      // but we maintain access through an interface to allow overriding for tests
+      this.rpcEngine = new ProtobufRpcClientEngine(conf);
       String adminClassName = conf.get(REGION_PROTOCOL_CLASS,
         DEFAULT_ADMIN_PROTOCOL_CLASS);
       this.closed = false;
@@ -675,12 +679,10 @@ public class HConnectionManager {
       public int userCount;
       public long keepAliveUntil = Long.MAX_VALUE;
       public final Class<? extends MasterProtocol> protocolClass;
-      public long version;
 
       public MasterProtocolState (
-          final Class<? extends MasterProtocol> protocolClass, long version) {
+          final Class<? extends MasterProtocol> protocolClass) {
         this.protocolClass = protocolClass;
-        this.version = version;
       }
     }
 
@@ -712,16 +714,14 @@ public class HConnectionManager {
 
         InetSocketAddress isa =
           new InetSocketAddress(sn.getHostname(), sn.getPort());
-        MasterProtocol tryMaster = (MasterProtocol) HBaseRPC.getProxy(
-          masterProtocolState.protocolClass,
-          masterProtocolState.version,
-          isa, this.conf,this.rpcTimeout);
+        MasterProtocol tryMaster = rpcEngine.getProxy(
+            masterProtocolState.protocolClass,
+            isa, this.conf, this.rpcTimeout);
 
         if (tryMaster.isMasterRunning(
             null, RequestConverter.buildIsMasterRunningRequest()).getIsMasterRunning()) {
           return tryMaster;
         } else {
-          HBaseRPC.stopProxy(tryMaster);
           String msg = "Can create a proxy to master, but it is not running";
           LOG.info(msg);
           throw new MasterNotRunningException(msg);
@@ -734,11 +734,13 @@ public class HConnectionManager {
     /**
      * Create a master, retries if necessary.
      */
+    @edu.umd.cs.findbugs.annotations.SuppressWarnings (value="SWL_SLEEP_WITH_LOCK_HELD")
     private MasterProtocol createMasterWithRetries(
       MasterProtocolState masterProtocolState) throws MasterNotRunningException {
 
       // The lock must be at the beginning to prevent multiple master creation
       //  (and leaks) in a multithread context
+
       synchronized (this.masterAndZKLock) {
         Exception exceptionCaught = null;
         MasterProtocol master = null;
@@ -760,7 +762,8 @@ public class HConnectionManager {
           if (exceptionCaught != null)
             // It failed. If it's not the last try, we're going to wait a little
           if (tries < numRetries) {
-            long pauseTime = ConnectionUtils.getPauseTime(this.pause, tries);
+            // tries at this point is 1 or more; decrement to start from 0.
+            long pauseTime = ConnectionUtils.getPauseTime(this.pause, tries - 1);
             LOG.info("getMaster attempt " + tries + " of " + numRetries +
               " failed; retrying after sleep of " +pauseTime, exceptionCaught);
 
@@ -890,17 +893,27 @@ public class HConnectionManager {
     }
 
     @Override
-    public HRegionLocation locateRegion(final byte [] regionName)
-    throws IOException {
-      // TODO implement.  use old stuff or new stuff?
-      return null;
+    public HRegionLocation locateRegion(final byte[] regionName) throws IOException {
+      return locateRegion(HRegionInfo.getTableName(regionName),
+          HRegionInfo.getStartKey(regionName), false, true);
     }
 
     @Override
-    public List<HRegionLocation> locateRegions(final byte [] tableName)
+    public List<HRegionLocation> locateRegions(final byte[] tableName)
     throws IOException {
-      // TODO implement.  use old stuff or new stuff?
-      return null;
+      return locateRegions (tableName, false, true);
+    }
+    
+    @Override
+    public List<HRegionLocation> locateRegions(final byte[] tableName, final boolean useCache,
+        final boolean offlined) throws IOException {
+      NavigableMap<HRegionInfo, ServerName> regions = MetaScanner.allTableRegions(conf, tableName,
+        offlined);
+      final List<HRegionLocation> locations = new ArrayList<HRegionLocation>();
+      for (HRegionInfo regionInfo : regions.keySet()) {
+        locations.add(locateRegion(tableName, regionInfo.getStartKey(), useCache, true));
+      }
+      return locations;
     }
 
     @Override
@@ -945,8 +958,8 @@ public class HConnectionManager {
           LOG.debug("Looked up root region location, connection=" + this +
             "; serverName=" + ((servername == null) ? "null" : servername));
           if (servername == null) return null;
-          return new HRegionLocation(HRegionInfo.ROOT_REGIONINFO,
-            servername.getHostname(), servername.getPort());
+          return new HRegionLocation(HRegionInfo.ROOT_REGIONINFO, servername.getHostname(),
+              servername.getPort(), 0);
         } catch (InterruptedException e) {
           Thread.currentThread().interrupt();
           return null;
@@ -995,10 +1008,9 @@ public class HConnectionManager {
             }
             // instantiate the location
             HRegionLocation loc = new HRegionLocation(regionInfo, serverName.getHostname(),
-                serverName.getPort());
+                serverName.getPort(), HRegionInfo.getSeqNumDuringOpen(result));
             // cache this meta entry
-            cacheLocation(tableName, loc);
-
+            cacheLocation(tableName, null, loc);
             return true;
           } catch (RuntimeException e) {
             throw new IOException(e);
@@ -1074,7 +1086,7 @@ public class HConnectionManager {
                 return location;
               }
             } else {
-              deleteCachedLocation(tableName, row);
+              forceDeleteCachedLocation(tableName, row);
             }
 
             // Query the root or meta region for the location of the meta region
@@ -1120,9 +1132,9 @@ public class HConnectionManager {
           }
 
           // Instantiate the location
-          location =
-              new HRegionLocation(regionInfo, serverName.getHostname(), serverName.getPort());
-          cacheLocation(tableName, location);
+          location = new HRegionLocation(regionInfo, serverName.getHostname(),
+                  serverName.getPort(), HRegionInfo.getSeqNumDuringOpen(regionInfoRow));
+          cacheLocation(tableName, null, location);
           return location;
         } catch (TableNotFoundException e) {
           // if we got this error, probably means the table just plain doesn't
@@ -1210,29 +1222,30 @@ public class HConnectionManager {
     }
 
     /**
-     * Delete a cached location
+     * Delete a cached location, no matter what it is. Called when we were told to not use cache.
      * @param tableName tableName
      * @param row
      */
-    void deleteCachedLocation(final byte [] tableName, final byte [] row) {
+    void forceDeleteCachedLocation(final byte [] tableName, final byte [] row) {
+      HRegionLocation rl = null;
       synchronized (this.cachedRegionLocations) {
         Map<byte[], HRegionLocation> tableLocations =
             getTableLocations(tableName);
         // start to examine the cache. we can only do cache actions
         // if there's something in the cache for this table.
         if (!tableLocations.isEmpty()) {
-          HRegionLocation rl = getCachedLocation(tableName, row);
+          rl = getCachedLocation(tableName, row);
           if (rl != null) {
             tableLocations.remove(rl.getRegionInfo().getStartKey());
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Removed " +
-                rl.getRegionInfo().getRegionNameAsString() +
-                " for tableName=" + Bytes.toString(tableName) +
-                " from cache " + "because of " + Bytes.toStringBinary(row));
-            }
           }
         }
       }
+      if ((rl != null) && LOG.isDebugEnabled()) {
+        LOG.debug("Removed " + rl.getHostname() + ":" + rl.getPort()
+          + " as a location of " + rl.getRegionInfo().getRegionNameAsString() +
+          " for tableName=" + Bytes.toString(tableName) +
+          " from cache to make sure we don't use cache for " + Bytes.toStringBinary(row));
+      }
     }
 
     @Override
@@ -1304,23 +1317,52 @@ public class HConnectionManager {
       }
     }
 
-    /*
+    /**
      * Put a newly discovered HRegionLocation into the cache.
+     * @param tableName The table name.
+     * @param source the source of the new location, if it's not coming from meta
+     * @param location the new location
      */
-    private void cacheLocation(final byte [] tableName,
+    private void cacheLocation(final byte [] tableName, final HRegionLocation source,
         final HRegionLocation location) {
+      boolean isFromMeta = (source == null);
       byte [] startKey = location.getRegionInfo().getStartKey();
       Map<byte [], HRegionLocation> tableLocations =
         getTableLocations(tableName);
-      boolean hasNewCache = false;
+      boolean isNewCacheEntry = false;
+      boolean isStaleUpdate = false;
+      HRegionLocation oldLocation = null;
       synchronized (this.cachedRegionLocations) {
         cachedServers.add(location.getHostnamePort());
-        hasNewCache = (tableLocations.put(startKey, location) == null);
+        oldLocation = tableLocations.get(startKey);
+        isNewCacheEntry = (oldLocation == null);
+        // If the server in cache sends us a redirect, assume it's always valid.
+        if (!isNewCacheEntry && !oldLocation.equals(source)) {
+          long newLocationSeqNum = location.getSeqNum();
+          // Meta record is stale - some (probably the same) server has closed the region
+          // with later seqNum and told us about the new location.
+          boolean isStaleMetaRecord = isFromMeta && (oldLocation.getSeqNum() > newLocationSeqNum);
+          // Same as above for redirect. However, in this case, if the number is equal to previous
+          // record, the most common case is that first the region was closed with seqNum, and then
+          // opened with the same seqNum; hence we will ignore the redirect.
+          // There are so many corner cases with various combinations of opens and closes that
+          // an additional counter on top of seqNum would be necessary to handle them all.
+          boolean isStaleRedirect = !isFromMeta && (oldLocation.getSeqNum() >= newLocationSeqNum);
+          isStaleUpdate = (isStaleMetaRecord || isStaleRedirect);
+        }
+        if (!isStaleUpdate) {
+          tableLocations.put(startKey, location);
+        }
       }
-      if (hasNewCache) {
+      if (isNewCacheEntry) {
         LOG.debug("Cached location for " +
             location.getRegionInfo().getRegionNameAsString() +
             " is " + location.getHostnamePort());
+      } else if (isStaleUpdate && !location.equals(oldLocation)) {
+        LOG.debug("Ignoring stale location update for "
+          + location.getRegionInfo().getRegionNameAsString() + ": "
+          + location.getHostnamePort() + " at " + location.getSeqNum() + "; local "
+          + oldLocation.getHostnamePort() + " at " + oldLocation.getSeqNum());
       }
     }
 
@@ -1331,17 +1373,16 @@ public class HConnectionManager {
     }
 
     @Override
-    public ClientProtocol getClient(
-        final String hostname, final int port) throws IOException {
-      return (ClientProtocol)getProtocol(hostname, port,
-        clientClass, ClientProtocol.VERSION);
+    public ClientProtocol getClient(final String hostname, final int port)
+    throws IOException {
+      return (ClientProtocol)getProtocol(hostname, port, clientClass);
     }
 
     @Override
-    public AdminProtocol getAdmin(final String hostname,
-        final int port, final boolean master) throws IOException {
-      return (AdminProtocol)getProtocol(hostname, port,
-        adminClass, AdminProtocol.VERSION);
+    public AdminProtocol getAdmin(final String hostname, final int port,
+        final boolean master)
+    throws IOException {
+      return (AdminProtocol)getProtocol(hostname, port, adminClass);
     }
 
     /**
@@ -1350,26 +1391,25 @@ public class HConnectionManager {
      * @param hostname
      * @param port
      * @param protocolClass
-     * @param version
      * @return Proxy.
      * @throws IOException
      */
-    VersionedProtocol getProtocol(final String hostname,
-        final int port, final Class <? extends VersionedProtocol> protocolClass,
-        final long version) throws IOException {
+    IpcProtocol getProtocol(final String hostname,
+        final int port, final Class <? extends IpcProtocol> protocolClass)
+    throws IOException {
       String rsName = Addressing.createHostAndPortStr(hostname, port);
       // See if we already have a connection (common case)
-      Map<String, VersionedProtocol> protocols = this.servers.get(rsName);
+      Map<String, IpcProtocol> protocols = this.servers.get(rsName);
       if (protocols == null) {
-        protocols = new HashMap<String, VersionedProtocol>();
-        Map<String, VersionedProtocol> existingProtocols =
+        protocols = new HashMap<String, IpcProtocol>();
+        Map<String, IpcProtocol> existingProtocols =
           this.servers.putIfAbsent(rsName, protocols);
         if (existingProtocols != null) {
           protocols = existingProtocols;
         }
       }
       String protocol = protocolClass.getName();
-      VersionedProtocol server = protocols.get(protocol);
+      IpcProtocol server = protocols.get(protocol);
       if (server == null) {
         // create a unique lock for this RS + protocol (if necessary)
         String lockKey = protocol + "@" + rsName;
@@ -1383,8 +1423,7 @@ public class HConnectionManager {
               // Only create isa when we need to.
               InetSocketAddress address = new InetSocketAddress(hostname, port);
               // definitely a cache miss. establish an RPC for this RS
-              server = HBaseRPC.waitForProxy(
-                  protocolClass, version, address, this.conf,
+              server = HBaseClientRPC.waitForProxy(rpcEngine, protocolClass, address, this.conf,
                   this.maxRPCAttempts, this.rpcTimeout, this.rpcTimeout);
               protocols.put(protocol, server);
             } catch (RemoteException e) {
@@ -1462,7 +1501,7 @@ public class HConnectionManager {
     /**
      * Creates a Chore thread to check the connections to master & zookeeper
      *  and close them when they reach their closing time (
-     *  {@link #MasterProtocolState.keepAliveUntil} and
+     *  {@link MasterProtocolState#keepAliveUntil} and
      *  {@link #keepZooKeeperWatcherAliveUntil}). Keep alive time is
      *  managed by the release functions and the variable {@link #keepAlive}
      */
@@ -1581,9 +1620,9 @@ public class HConnectionManager {
     }
 
     MasterProtocolState masterAdminProtocol =
-      new MasterProtocolState(MasterAdminProtocol.class, MasterAdminProtocol.VERSION);
+      new MasterProtocolState(MasterAdminProtocol.class);
     MasterProtocolState masterMonitorProtocol =
-      new MasterProtocolState(MasterMonitorProtocol.class, MasterMonitorProtocol.VERSION);
+      new MasterProtocolState(MasterMonitorProtocol.class);
 
     /**
      * This function allows HBaseAdmin and potentially others
@@ -1597,9 +1636,6 @@ public class HConnectionManager {
         throws MasterNotRunningException {
       synchronized (masterAndZKLock) {
         if (!isKeepAliveMasterConnectedAndRunning(protocolState)) {
-          if (protocolState.protocol != null) {
-            HBaseRPC.stopProxy(protocolState.protocol);
-          }
           protocolState.protocol = null;
           protocolState.protocol = createMasterWithRetries(protocolState);
         }
@@ -1617,7 +1653,7 @@ public class HConnectionManager {
     @Override
     public MasterAdminProtocol getMasterAdmin() throws MasterNotRunningException {
       return getKeepAliveMasterAdmin();
-    };
+    }
 
     @Override
     public MasterMonitorProtocol getMasterMonitor() throws MasterNotRunningException {
@@ -1674,7 +1710,6 @@ public class HConnectionManager {
     private void closeMasterProtocol(MasterProtocolState protocolState) {
       if (protocolState.protocol != null){
         LOG.info("Closing master protocol: " + protocolState.protocolClass.getName());
-        HBaseRPC.stopProxy(protocolState.protocol);
         protocolState.protocol = null;
       }
       protocolState.userCount = 0;
@@ -1730,60 +1765,72 @@ public class HConnectionManager {
       };
    }
 
-
-    void updateCachedLocation(HRegionLocation hrl, String hostname, int port) {
-      HRegionLocation newHrl = new HRegionLocation(hrl.getRegionInfo(), hostname, port);
+   void updateCachedLocation(HRegionInfo hri, HRegionLocation source,
+       String hostname, int port, long seqNum) {
+      HRegionLocation newHrl = new HRegionLocation(hri, hostname, port, seqNum);
       synchronized (this.cachedRegionLocations) {
-        cacheLocation(hrl.getRegionInfo().getTableName(), newHrl);
+        cacheLocation(hri.getTableName(), source, newHrl);
       }
     }
 
-    void deleteCachedLocation(HRegionLocation rl) {
+   /**
+    * Deletes the cached location of the region if necessary, based on some error from source.
+    * @param hri The region in question.
+    * @param source The source of the error that prompts us to invalidate cache.
+    */
+    void deleteCachedLocation(HRegionInfo hri, HRegionLocation source) {
+      boolean isStaleDelete = false;
+      HRegionLocation oldLocation = null;
       synchronized (this.cachedRegionLocations) {
         Map<byte[], HRegionLocation> tableLocations =
-          getTableLocations(rl.getRegionInfo().getTableName());
-        tableLocations.remove(rl.getRegionInfo().getStartKey());
+          getTableLocations(hri.getTableName());
+        oldLocation = tableLocations.get(hri.getStartKey());
+        if (oldLocation != null) {
+           // Do not delete the cache entry if it's not for the same server that gave us the error.
+          isStaleDelete = (source != null) && !oldLocation.equals(source);
+          if (!isStaleDelete) {
+            tableLocations.remove(hri.getStartKey());
+          }
+        }
+      }
+      if (isStaleDelete) {
+        LOG.debug("Received an error from " + source.getHostnamePort() + " for region "
+          + hri.getRegionNameAsString() + "; not removing "
+          + oldLocation.getHostnamePort() + " from cache.");
       }
-    }
-
-    private void updateCachedLocations(byte[] tableName, Row row, Object t) {
-      updateCachedLocations(null, tableName, row, t);
     }
 
     /**
-     * Update the location with the new value (if the exception is a RegionMovedException) or delete
-     *  it from the cache.
-     * @param hrl - can be null. If it's the case, tableName and row should not be null
-     * @param tableName - can be null if hrl is not null.
-     * @param row  - can be null if hrl is not null.
-     * @param exception - An object (to simplify user code) on which we will try to find a nested
+     * Update the location with the new value (if the exception is a RegionMovedException)
+     * or delete it from the cache.
+     * @param exception an object (to simplify user code) on which we will try to find a nested
      *                  or wrapped or both RegionMovedException
+     * @param source server that is the source of the location update.
      */
-    private void updateCachedLocations(final HRegionLocation hrl, final byte[] tableName,
-      Row row, final Object exception) {
-
-      if ((row == null || tableName == null) && hrl == null){
-        LOG.warn ("Coding error, see method javadoc. row="+row+", tableName="+
-          Bytes.toString(tableName)+", hrl="+hrl);
+    private void updateCachedLocations(final byte[] tableName, Row row,
+      final Object exception, final HRegionLocation source) {
+      if (row == null || tableName == null) {
+        LOG.warn("Coding error, see method javadoc. row=" + (row == null ? "null" : row) +
+            ", tableName=" + (tableName == null ? "null" : Bytes.toString(tableName)));
         return;
       }
 
       // Is it something we have already updated?
-      final HRegionLocation myLoc = (hrl != null ?
-        hrl : getCachedLocation(tableName, row.getRow()));
-      if (myLoc == null) {
+      final HRegionLocation oldLocation = getCachedLocation(tableName, row.getRow());
+      if (oldLocation == null) {
         // There is no such location in the cache => it's been removed already => nothing to do
         return;
       }
 
+      HRegionInfo regionInfo = oldLocation.getRegionInfo();
       final RegionMovedException rme = RegionMovedException.find(exception);
       if (rme != null) {
-        LOG.info("Region " + myLoc.getRegionInfo().getRegionNameAsString() + " moved from " +
-          myLoc.getHostnamePort() + ", updating client location cache." +
-          " New server: " + rme.getHostname() + ":" + rme.getPort());
-        updateCachedLocation(myLoc, rme.getHostname(), rme.getPort());
+        LOG.info("Region " + regionInfo.getRegionNameAsString() + " moved to " +
+          rme.getHostname() + ":" + rme.getPort() + " according to " + source.getHostnamePort());
+        updateCachedLocation(
+            regionInfo, source, rme.getHostname(), rme.getPort(), rme.getLocationSeqNum());
       } else {
-        deleteCachedLocation(myLoc);
+        deleteCachedLocation(regionInfo, source);
       }
     }
 
@@ -1841,12 +1888,6 @@ public class HConnectionManager {
       private final Object[] results;
       private final Batch.Callback<R> callback;
 
-      // Error management: these lists are filled by the errors on the final try. Indexes
-      //  are consistent, i.e. exceptions[i] matches failedActions[i] and failedAddresses[i]
-      private final List<Throwable> exceptions;
-      private final List<Row> failedActions;
-      private final List<String> failedAddresses;
-
       // Used during the batch process
       private final List<Action<R>> toReplay;
       private final LinkedList<Triple<MultiAction<R>, HRegionLocation, Future<MultiResponse>>>
@@ -1868,9 +1909,6 @@ public class HConnectionManager {
         this.toReplay = new ArrayList<Action<R>>();
         this.inProgress =
           new LinkedList<Triple<MultiAction<R>, HRegionLocation, Future<MultiResponse>>>();
-        this.exceptions = new ArrayList<Throwable>();
-        this.failedActions = new ArrayList<Row>();
-        this.failedAddresses = new ArrayList<String>();
         this.curNumRetries = 0;
       }
 
@@ -1909,6 +1947,14 @@ public class HConnectionManager {
         for (Entry<HRegionLocation, MultiAction<R>> e : actionsByServer.entrySet()) {
           Callable<MultiResponse> callable =
             createDelayedCallable(sleepTime, e.getKey(), e.getValue());
+          if (LOG.isTraceEnabled() && (sleepTime > 0)) {
+            StringBuilder sb = new StringBuilder();
+            for (Action<R> action : e.getValue().allActions()) {
+              sb.append(Bytes.toStringBinary(action.getAction().getRow()) + ";");
+            }
+            LOG.trace("Sending requests to [" + e.getKey().getHostnamePort()
+              + "] with delay of [" + sleepTime + "] for rows [" + sb.toString() + "]");
+          }
           Triple<MultiAction<R>, HRegionLocation, Future<MultiResponse>> p =
             new Triple<MultiAction<R>, HRegionLocation, Future<MultiResponse>>(
               e.getValue(), e.getKey(), this.pool.submit(callable));
@@ -1916,22 +1962,15 @@ public class HConnectionManager {
         }
       }
 
-
-      private void addToErrorsLists(Exception ex, Row row, Triple<MultiAction<R>,
-          HRegionLocation, Future<MultiResponse>> obj) {
-        this.exceptions.add(ex);
-        this.failedActions.add(row);
-        this.failedAddresses.add(obj.getSecond().getHostnamePort());
-      }
-
      /**
       * Resubmit the actions which have failed, after a sleep time.
       * @throws IOException
       */
       private void doRetry() throws IOException{
-          final long sleepTime = ConnectionUtils.getPauseTime(hci.pause, this.curNumRetries);
-          submit(this.toReplay, sleepTime);
-          this.toReplay.clear();
+        // curNumRetries at this point is 1 or more; decrement to start from 0.
+        final long sleepTime = ConnectionUtils.getPauseTime(hci.pause, this.curNumRetries - 1);
+        submit(this.toReplay, sleepTime);
+        this.toReplay.clear();
       }
 
       /**
@@ -1950,6 +1989,13 @@ public class HConnectionManager {
           return;
         }
 
+        boolean isTraceEnabled = LOG.isTraceEnabled();
+        BatchErrors errors = new BatchErrors();
+        BatchErrors retriedErrors = null;
+        if (isTraceEnabled) {
+          retriedErrors = new BatchErrors();
+        }
+
         // We keep the number of retry per action.
         int[] nbRetries = new int[this.results.length];
 
@@ -1975,7 +2021,6 @@ public class HConnectionManager {
 
         // Analyze and resubmit until all actions are done successfully or failed after numRetries
         while (!this.inProgress.isEmpty()) {
-
           // We need the original multi action to find out what actions to replay if
           //  we have a 'total' failure of the Future<MultiResponse>
           // We need the HRegionLocation as we give it back if we go out of retries
@@ -1996,10 +2041,13 @@ public class HConnectionManager {
             for (List<Action<R>> actions : currentTask.getFirst().actions.values()) {
               for (Action<R> action : actions) {
                 Row row = action.getAction();
-                hci.updateCachedLocations(this.tableName, row, exception);
+                hci.updateCachedLocations(tableName, row, exception, currentTask.getSecond());
                 if (noRetry) {
-                  addToErrorsLists(exception, row, currentTask);
+                  errors.add(exception, row, currentTask);
                 } else {
+                  if (isTraceEnabled) {
+                    retriedErrors.add(exception, row, currentTask);
+                  }
                   lastRetry = addToReplay(nbRetries, action);
                 }
               }
@@ -2019,10 +2067,13 @@ public class HConnectionManager {
                 // Failure: retry if it's make sense else update the errors lists
                 if (result == null || result instanceof Throwable) {
                   Row row = correspondingAction.getAction();
-                  hci.updateCachedLocations(this.tableName, row, result);
+                  hci.updateCachedLocations(this.tableName, row, result, currentTask.getSecond());
                   if (result instanceof DoNotRetryIOException || noRetry) {
-                    addToErrorsLists((Exception)result, row, currentTask);
+                    errors.add((Exception)result, row, currentTask);
                   } else {
+                    if (isTraceEnabled) {
+                      retriedErrors.add((Exception)result, row, currentTask);
+                    }
                     lastRetry = addToReplay(nbRetries, correspondingAction);
                   }
                 } else // success
@@ -2037,20 +2088,60 @@ public class HConnectionManager {
 
           // Retry all actions in toReplay then clear it.
           if (!noRetry && !toReplay.isEmpty()) {
+            if (isTraceEnabled) {
+              LOG.trace("Retrying due to errors: " + retriedErrors.getDescriptionAndClear());
+            }
             doRetry();
             if (lastRetry) {
+              if (isTraceEnabled) {
+                LOG.trace("No more retries");
+              }
               noRetry = true;
             }
           }
         }
 
-        if (!exceptions.isEmpty()) {
-          throw new RetriesExhaustedWithDetailsException(this.exceptions,
-            this.failedActions,
-            this.failedAddresses);
+        errors.rethrowIfAny();
+      }
+
+
+      private class BatchErrors {
+        private List<Throwable> exceptions = new ArrayList<Throwable>();
+        private List<Row> actions = new ArrayList<Row>();
+        private List<String> addresses = new ArrayList<String>();
+
+        public void add(Exception ex, Row row,
+          Triple<MultiAction<R>, HRegionLocation, Future<MultiResponse>> obj) {
+          exceptions.add(ex);
+          actions.add(row);
+          addresses.add(obj.getSecond().getHostnamePort());
+        }
+
+        public void rethrowIfAny() throws RetriesExhaustedWithDetailsException {
+          if (!exceptions.isEmpty()) {
+            throw makeException();
+          }
+        }
+
+        public String getDescriptionAndClear()
+        {
+          if (exceptions.isEmpty()) {
+            return "";
+          }
+          String result = makeException().getMessage();
+          exceptions.clear();
+          actions.clear();
+          addresses.clear();
+          return result;
+        };
+
+        private RetriesExhaustedWithDetailsException makeException() {
+          return new RetriesExhaustedWithDetailsException(exceptions, actions, addresses);
         }
       }
 
+
+
       /**
        * Put the action that has to be retried in the Replay list.
        * @return true if we're out of numRetries and it's the last retry.
@@ -2124,75 +2215,6 @@ public class HConnectionManager {
       }
     }
 
-
-    /**
-     * Executes the given
-     * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call}
-     * callable for each row in the
-     * given list and invokes
-     * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[], byte[], Object)}
-     * for each result returned.
-     *
-     * @param protocol the protocol interface being called
-     * @param rows a list of row keys for which the callable should be invoked
-     * @param tableName table name for the coprocessor invoked
-     * @param pool ExecutorService used to submit the calls per row
-     * @param callable instance on which to invoke
-     * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call(Object)}
-     * for each row
-     * @param callback instance on which to invoke
-     * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[], byte[], Object)}
-     * for each result
-     * @param <T> the protocol interface type
-     * @param <R> the callable's return type
-     * @throws IOException
-     */
-    @Deprecated
-    public <T extends CoprocessorProtocol,R> void processExecs(
-        final Class<T> protocol,
-        List<byte[]> rows,
-        final byte[] tableName,
-        ExecutorService pool,
-        final Batch.Call<T,R> callable,
-        final Batch.Callback<R> callback)
-      throws IOException, Throwable {
-
-      Map<byte[],Future<R>> futures =
-          new TreeMap<byte[],Future<R>>(Bytes.BYTES_COMPARATOR);
-      for (final byte[] r : rows) {
-        final ExecRPCInvoker invoker =
-            new ExecRPCInvoker(conf, this, protocol, tableName, r);
-        Future<R> future = pool.submit(
-            new Callable<R>() {
-              public R call() throws Exception {
-                T instance = (T)Proxy.newProxyInstance(conf.getClassLoader(),
-                    new Class[]{protocol},
-                    invoker);
-                R result = callable.call(instance);
-                byte[] region = invoker.getRegionName();
-                if (callback != null) {
-                  callback.update(region, r, result);
-                }
-                return result;
-              }
-            });
-        futures.put(r, future);
-      }
-      for (Map.Entry<byte[],Future<R>> e : futures.entrySet()) {
-        try {
-          e.getValue().get();
-        } catch (ExecutionException ee) {
-          LOG.warn("Error executing for row "+Bytes.toStringBinary(e.getKey()), ee);
-          throw ee.getCause();
-        } catch (InterruptedException ie) {
-          Thread.currentThread().interrupt();
-          throw new IOException("Interrupted executing for row " +
-              Bytes.toStringBinary(e.getKey()), ie);
-        }
-      }
-    }
-
-
     /*
      * Return the number of cached region for a table. It will only be called
      * from a unit test.
@@ -2210,8 +2232,6 @@ public class HConnectionManager {
       }
     }
 
-
-
     /**
      * Check the region cache to see whether a region is cached yet or not.
      * Called by unit tests.
@@ -2252,13 +2272,14 @@ public class HConnectionManager {
             closeZooKeeperWatcher();
           }
         }
-      }else {
+      } else {
         if (t != null) {
           LOG.fatal(msg, t);
         } else {
           LOG.fatal(msg);
         }
         this.aborted = true;
+        close();
         this.closed = true;
       }
     }
@@ -2288,10 +2309,6 @@ public class HConnectionManager {
       }
     }
 
-    public void stopProxyOnClose(boolean stopProxy) {
-      this.stopProxy = stopProxy;
-    }
-
     /**
      * Increment this client's reference count.
      */
@@ -2317,30 +2334,28 @@ public class HConnectionManager {
       return refCount == 0;
     }
 
-    void close(boolean stopProxy) {
+    void internalClose() {
       if (this.closed) {
         return;
       }
       delayedClosing.stop("Closing connection");
-      if (stopProxy) {
-        closeMaster();
-        for (Map<String, VersionedProtocol> i : servers.values()) {
-          for (VersionedProtocol server: i.values()) {
-            HBaseRPC.stopProxy(server);
-          }
-        }
-      }
+      closeMaster();
       closeZooKeeperWatcher();
       this.servers.clear();
+      this.rpcEngine.close();
       this.closed = true;
     }
 
     @Override
     public void close() {
       if (managed) {
-        HConnectionManager.deleteConnection(this, stopProxy, false);
+        if (aborted) {
+          HConnectionManager.deleteStaleConnection(this);
+        } else {
+          HConnectionManager.deleteConnection(this, false);
+        }
       } else {
-        close(true);
+        internalClose();
       }
     }
 
@@ -2427,6 +2442,14 @@ public class HConnectionManager {
       }
       throw new TableNotFoundException(Bytes.toString(tableName));
     }
+
+    /**
+     * Override the RpcClientEngine implementation used by this connection.
+     * <strong>FOR TESTING PURPOSES ONLY!</strong>
+     */
+    void setRpcEngine(RpcClientEngine engine) {
+      this.rpcEngine = engine;
+    }
   }
 
   /**

Modified: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTable.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTable.java?rev=1445918&r1=1445917&r2=1445918&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTable.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTable.java Wed Feb 13 20:58:23 2013
@@ -21,16 +21,15 @@ package org.apache.hadoop.hbase.client;
 import java.io.Closeable;
 import java.io.IOException;
 import java.io.InterruptedIOException;
-import java.lang.reflect.Proxy;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.NavigableMap;
 import java.util.TreeMap;
 import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
@@ -53,20 +52,17 @@ import org.apache.hadoop.hbase.ServerNam
 import org.apache.hadoop.hbase.client.HConnectionManager.HConnectable;
 import org.apache.hadoop.hbase.client.coprocessor.Batch;
 import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.ipc.CoprocessorProtocol;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-import org.apache.hadoop.hbase.ipc.ExecRPCInvoker;
 import org.apache.hadoop.hbase.ipc.RegionCoprocessorRpcChannel;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.LockRowRequest;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.LockRowResponse;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiGetRequest;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiGetResponse;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.UnlockRowRequest;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CompareType;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
@@ -245,7 +241,6 @@ public class HTable implements HTableInt
 
   /**
    * setup this HTable's parameter based on the passed configuration
-   * @param conf
    */
   private void finishSetup() throws IOException {
     this.connection.locateRegion(tableName, HConstants.EMPTY_START_ROW);
@@ -347,11 +342,10 @@ public class HTable implements HTableInt
   }
 
   /**
-   * Finds the region on which the given row is being served.
+   * Finds the region on which the given row is being served. Does not reload the cache.
    * @param row Row to find.
    * @return Location of the row.
    * @throws IOException if a remote or network exception occurs
-   * @deprecated use {@link #getRegionLocation(byte [], boolean)} instead
    */
   public HRegionLocation getRegionLocation(final byte [] row)
   throws IOException {
@@ -361,8 +355,7 @@ public class HTable implements HTableInt
   /**
    * Finds the region on which the given row is being served.
    * @param row Row to find.
-   * @param reload whether or not to reload information or just use cached
-   * information
+   * @param reload true to reload information or false to use cached information
    * @return Location of the row.
    * @throws IOException if a remote or network exception occurs
    */
@@ -882,6 +875,147 @@ public class HTable implements HTableInt
   }
 
   /**
+   * Goal of this inner class is to keep track of the initial position of a get in a list before
+   * sorting it. This is used to send back results in the same orders we got the Gets before we sort
+   * them.
+   */
+  private static class SortedGet implements Comparable<SortedGet> {
+    protected int initialIndex = -1; // Used to store the get initial index in a list.
+    protected Get get; // Encapsulated Get instance.
+
+    public SortedGet (Get get, int initialIndex) {
+      this.get = get;
+      this.initialIndex = initialIndex;
+    }
+
+    public int getInitialIndex() {
+      return initialIndex;
+    }
+
+    @Override
+    public int compareTo(SortedGet o) {
+      return get.compareTo(o.get);
+    }
+
+    public Get getGet() {
+      return get;
+    }
+
+    @Override
+    public int hashCode() {
+      return get.hashCode();
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+      if (obj instanceof SortedGet)
+        return get.equals(((SortedGet)obj).get);
+      else
+        return false;
+    }
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public Boolean[] exists(final List<Get> gets) throws IOException {
+    // Prepare the sorted list of gets. Take the list of gets received, and encapsulate them into
+    // a list of SortedGet instances. Simple list parsing, so complexity here is O(n)
+    // The list is later used to recreate the response order based on the order the Gets
+    // got received.
+    ArrayList<SortedGet> sortedGetsList = new ArrayList<HTable.SortedGet>();
+    for (int indexGet = 0; indexGet < gets.size(); indexGet++) {
+      sortedGetsList.add(new SortedGet (gets.get(indexGet), indexGet));
+    }
+
+    // Sorting the list to get the Gets ordered based on the key.
+    Collections.sort(sortedGetsList); // O(n log n)
+
+    // step 1: sort the requests by regions to send them bundled.
+    // Map key is startKey index. Map value is the list of Gets related to the region starting
+    // with the startKey.
+    Map<Integer, List<Get>> getsByRegion = new HashMap<Integer, List<Get>>();
+
+    // Reference map to quickly find back in which region a get belongs.
+    Map<Get, Integer> getToRegionIndexMap = new HashMap<Get, Integer>();
+    Pair<byte[][], byte[][]> startEndKeys = getStartEndKeys();
+
+    int regionIndex = 0;
+    for (final SortedGet get : sortedGetsList) {
+      // Progress on the regions until we find the one the current get resides in.
+      while ((regionIndex < startEndKeys.getSecond().length) && ((Bytes.compareTo(startEndKeys.getSecond()[regionIndex], get.getGet().getRow()) <= 0))) {
+        regionIndex++;
+      }
+      List<Get> regionGets = getsByRegion.get(regionIndex);
+      if (regionGets == null) {
+        regionGets = new ArrayList<Get>();
+        getsByRegion.put(regionIndex, regionGets);
+      }
+      regionGets.add(get.getGet());
+      getToRegionIndexMap.put(get.getGet(), regionIndex);
+    }
+
+    // step 2: make the requests
+    Map<Integer, Future<List<Boolean>>> futures =
+        new HashMap<Integer, Future<List<Boolean>>>(sortedGetsList.size());
+    for (final Map.Entry<Integer, List<Get>> getsByRegionEntry : getsByRegion.entrySet()) {
+      Callable<List<Boolean>> callable = new Callable<List<Boolean>>() {
+        public List<Boolean> call() throws Exception {
+          return new ServerCallable<List<Boolean>>(connection, tableName, getsByRegionEntry.getValue()
+              .get(0).getRow(), operationTimeout) {
+            public List<Boolean> call() throws IOException {
+              try {
+                MultiGetRequest requests = RequestConverter.buildMultiGetRequest(location
+                    .getRegionInfo().getRegionName(), getsByRegionEntry.getValue(), true, false);
+                MultiGetResponse responses = server.multiGet(null, requests);
+                return responses.getExistsList();
+              } catch (ServiceException se) {
+                throw ProtobufUtil.getRemoteException(se);
+              }
+            }
+          }.withRetries();
+        }
+      };
+      futures.put(getsByRegionEntry.getKey(), pool.submit(callable));
+    }
+
+    // step 3: collect the failures and successes
+    Map<Integer, List<Boolean>> responses = new HashMap<Integer, List<Boolean>>();
+    for (final Map.Entry<Integer, List<Get>> sortedGetEntry : getsByRegion.entrySet()) {
+      try {
+        Future<List<Boolean>> future = futures.get(sortedGetEntry.getKey());
+        List<Boolean> resp = future.get();
+
+        if (resp == null) {
+          LOG.warn("Failed for gets on region: " + sortedGetEntry.getKey());
+        }
+        responses.put(sortedGetEntry.getKey(), resp);
+      } catch (ExecutionException e) {
+        LOG.warn("Failed for gets on region: " + sortedGetEntry.getKey());
+      } catch (InterruptedException e) {
+        LOG.warn("Failed for gets on region: " + sortedGetEntry.getKey());
+        Thread.currentThread().interrupt();
+      }
+    }
+    Boolean[] results = new Boolean[sortedGetsList.size()];
+
+    // step 4: build the response.
+    Map<Integer, Integer> indexes = new HashMap<Integer, Integer>();
+    for (int i = 0; i < sortedGetsList.size(); i++) {
+      Integer regionInfoIndex = getToRegionIndexMap.get(sortedGetsList.get(i).getGet());
+      Integer index = indexes.get(regionInfoIndex);
+      if (index == null) {
+        index = 0;
+      }
+      results[sortedGetsList.get(i).getInitialIndex()] = responses.get(regionInfoIndex).get(index);
+      indexes.put(regionInfoIndex, index + 1);
+    }
+
+    return results;
+  }
+
+  /**
    * {@inheritDoc}
    */
   @Override
@@ -960,7 +1094,7 @@ public class HTable implements HTableInt
   }
 
   // validate for well-formedness
-  private void validatePut(final Put put) throws IllegalArgumentException{
+  public void validatePut(final Put put) throws IllegalArgumentException{
     if (put.isEmpty()) {
       throw new IllegalArgumentException("No columns to insert");
     }
@@ -979,46 +1113,6 @@ public class HTable implements HTableInt
    * {@inheritDoc}
    */
   @Override
-  public RowLock lockRow(final byte [] row)
-  throws IOException {
-    return new ServerCallable<RowLock>(connection, tableName, row, operationTimeout) {
-        public RowLock call() throws IOException {
-          try {
-            LockRowRequest request = RequestConverter.buildLockRowRequest(
-              location.getRegionInfo().getRegionName(), row);
-            LockRowResponse response = server.lockRow(null, request);
-            return new RowLock(row, response.getLockId());
-          } catch (ServiceException se) {
-            throw ProtobufUtil.getRemoteException(se);
-          }
-        }
-      }.withRetries();
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public void unlockRow(final RowLock rl)
-  throws IOException {
-    new ServerCallable<Boolean>(connection, tableName, rl.getRow(), operationTimeout) {
-        public Boolean call() throws IOException {
-          try {
-            UnlockRowRequest request = RequestConverter.buildUnlockRowRequest(
-              location.getRegionInfo().getRegionName(), rl.getLockId());
-            server.unlockRow(null, request);
-            return Boolean.TRUE;
-          } catch (ServiceException se) {
-            throw ProtobufUtil.getRemoteException(se);
-          }
-        }
-      }.withRetries();
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
   public boolean isAutoFlush() {
     return autoFlush;
   }
@@ -1195,22 +1289,6 @@ public class HTable implements HTableInt
   /**
    * {@inheritDoc}
    */
-  @Override
-  @Deprecated
-  public <T extends CoprocessorProtocol> T coprocessorProxy(
-      Class<T> protocol, byte[] row) {
-    return (T)Proxy.newProxyInstance(this.getClass().getClassLoader(),
-        new Class[]{protocol},
-        new ExecRPCInvoker(configuration,
-            connection,
-            protocol,
-            tableName,
-            row));
-  }
-
-  /**
-   * {@inheritDoc}
-   */
   public CoprocessorRpcChannel coprocessorService(byte[] row) {
     return new RegionCoprocessorRpcChannel(connection, tableName, row);
   }
@@ -1219,57 +1297,14 @@ public class HTable implements HTableInt
    * {@inheritDoc}
    */
   @Override
-  @Deprecated
-  public <T extends CoprocessorProtocol, R> Map<byte[],R> coprocessorExec(
-      Class<T> protocol, byte[] startKey, byte[] endKey,
-      Batch.Call<T,R> callable)
-      throws IOException, Throwable {
-
-    final Map<byte[],R> results =  Collections.synchronizedMap(new TreeMap<byte[],R>(
-        Bytes.BYTES_COMPARATOR));
-    coprocessorExec(protocol, startKey, endKey, callable,
-        new Batch.Callback<R>(){
-      public void update(byte[] region, byte[] row, R value) {
-        results.put(region, value);
-      }
-    });
-    return results;
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  @Deprecated
-  public <T extends CoprocessorProtocol, R> void coprocessorExec(
-      Class<T> protocol, byte[] startKey, byte[] endKey,
-      Batch.Call<T,R> callable, Batch.Callback<R> callback)
-      throws IOException, Throwable {
-
-    // get regions covered by the row range
-    List<byte[]> keys = getStartKeysInRange(startKey, endKey);
-    connection.processExecs(protocol, keys, tableName, pool, callable,
-        callback);
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
   public <T extends Service, R> Map<byte[],R> coprocessorService(final Class<T> service,
       byte[] startKey, byte[] endKey, final Batch.Call<T,R> callable)
       throws ServiceException, Throwable {
-    final Map<byte[],R> results =  new ConcurrentSkipListMap<byte[], R>(Bytes.BYTES_COMPARATOR);
+    final Map<byte[],R> results =  Collections.synchronizedMap(
+        new TreeMap<byte[], R>(Bytes.BYTES_COMPARATOR));
     coprocessorService(service, startKey, endKey, callable, new Batch.Callback<R>() {
       public void update(byte[] region, byte[] row, R value) {
-        if (value == null) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Call to " + service.getName() +
-                " received NULL value from Batch.Call for region " + Bytes.toStringBinary(region));
-          }
-        } else {
-          results.put(region, value);
-        }
+        results.put(region, value);
       }
     });
     return results;

Modified: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java?rev=1445918&r1=1445917&r2=1445918&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java Wed Feb 13 20:58:23 2013
@@ -31,7 +31,6 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.coprocessor.Batch;
-import org.apache.hadoop.hbase.ipc.CoprocessorProtocol;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
 
 /**
@@ -65,7 +64,7 @@ public interface HTableInterface extends
   HTableDescriptor getTableDescriptor() throws IOException;
 
   /**
-   * Test for the existence of columns in the table, as specified in the Get.
+   * Test for the existence of columns in the table, as specified by the Get.
    * <p>
    *
    * This will return true if the Get matches one or more keys, false if not.
@@ -81,12 +80,29 @@ public interface HTableInterface extends
   boolean exists(Get get) throws IOException;
 
   /**
-   * Method that does a batch call on Deletes, Gets and Puts. The ordering of
-   * execution of the actions is not defined. Meaning if you do a Put and a
+   * Test for the existence of columns in the table, as specified by the Gets.
+   * <p>
+   *
+   * This will return an array of booleans. Each value will be true if the related Get matches
+   * one or more keys, false if not.
+   * <p>
+   *
+   * This is a server-side call so it prevents any data from being transfered to
+   * the client.
+   *
+   * @param gets the Gets
+   * @return Array of Boolean true if the specified Get matches one or more keys, false if not
+   * @throws IOException e
+   */
+  Boolean[] exists(List<Get> gets) throws IOException;
+
+  /**
+   * Method that does a batch call on Deletes, Gets, Puts, Increments, Appends and RowMutations.
+   * The ordering of execution of the actions is not defined. Meaning if you do a Put and a
    * Get in the same {@link #batch} call, you will not necessarily be
    * guaranteed that the Get returns what the Put had put.
    *
-   * @param actions list of Get, Put, Delete objects
+   * @param actions list of Get, Put, Delete, Increment, Append, RowMutations objects
    * @param results Empty Object[], same size as actions. Provides access to partial
    *                results, in case an exception is thrown. A null in the result array means that
    *                the call for that action failed, even after retries
@@ -99,7 +115,7 @@ public interface HTableInterface extends
    * Same as {@link #batch(List, Object[])}, but returns an array of
    * results instead of using a results parameter reference.
    *
-   * @param actions list of Get, Put, Delete objects
+   * @param actions list of Get, Put, Delete, Increment, Append, RowMutations objects
    * @return the results from the actions. A null in the return array means that
    *         the call for that action failed, even after retries
    * @throws IOException
@@ -395,116 +411,6 @@ public interface HTableInterface extends
   void close() throws IOException;
 
   /**
-   * Obtains a lock on a row.
-   *
-   * @param row The row to lock.
-   * @return A {@link RowLock} containing the row and lock id.
-   * @throws IOException if a remote or network exception occurs.
-   * @see RowLock
-   * @see #unlockRow
-   */
-  RowLock lockRow(byte[] row) throws IOException;
-
-  /**
-   * Releases a row lock.
-   *
-   * @param rl The row lock to release.
-   * @throws IOException if a remote or network exception occurs.
-   * @see RowLock
-   * @see #unlockRow
-   */
-  void unlockRow(RowLock rl) throws IOException;
-
-  /**
-   * Creates and returns a proxy to the CoprocessorProtocol instance running in the
-   * region containing the specified row.  The row given does not actually have
-   * to exist.  Whichever region would contain the row based on start and end keys will
-   * be used.  Note that the {@code row} parameter is also not passed to the
-   * coprocessor handler registered for this protocol, unless the {@code row}
-   * is separately passed as an argument in a proxy method call.  The parameter
-   * here is just used to locate the region used to handle the call.
-   *
-   * @param protocol The class or interface defining the remote protocol
-   * @param row The row key used to identify the remote region location
-   * @return A CoprocessorProtocol instance
-   * @deprecated since 0.96.  Use {@link HTableInterface#coprocessorService(byte[])} instead.
-   */
-  @Deprecated
-  <T extends CoprocessorProtocol> T coprocessorProxy(Class<T> protocol, byte[] row);
-
-  /**
-   * Invoke the passed
-   * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call} against
-   * the {@link CoprocessorProtocol} instances running in the selected regions.
-   * All regions beginning with the region containing the <code>startKey</code>
-   * row, through to the region containing the <code>endKey</code> row (inclusive)
-   * will be used.  If <code>startKey</code> or <code>endKey</code> is
-   * <code>null</code>, the first and last regions in the table, respectively,
-   * will be used in the range selection.
-   *
-   * @param protocol the CoprocessorProtocol implementation to call
-   * @param startKey start region selection with region containing this row
-   * @param endKey select regions up to and including the region containing
-   * this row
-   * @param callable wraps the CoprocessorProtocol implementation method calls
-   * made per-region
-   * @param <T> CoprocessorProtocol subclass for the remote invocation
-   * @param <R> Return type for the
-   * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call(Object)}
-   * method
-   * @return a <code>Map</code> of region names to
-   * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call(Object)} return values
-   *
-   * @deprecated since 0.96.  Use
-   * {@link HTableInterface#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)} instead.
-   */
-  @Deprecated
-  <T extends CoprocessorProtocol, R> Map<byte[],R> coprocessorExec(
-      Class<T> protocol, byte[] startKey, byte[] endKey, Batch.Call<T,R> callable)
-      throws IOException, Throwable;
-
-  /**
-   * Invoke the passed
-   * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call} against
-   * the {@link CoprocessorProtocol} instances running in the selected regions.
-   * All regions beginning with the region containing the <code>startKey</code>
-   * row, through to the region containing the <code>endKey</code> row
-   * (inclusive)
-   * will be used.  If <code>startKey</code> or <code>endKey</code> is
-   * <code>null</code>, the first and last regions in the table, respectively,
-   * will be used in the range selection.
-   *
-   * <p>
-   * For each result, the given
-   * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[], byte[], Object)}
-   * method will be called.
-   *</p>
-   *
-   * @param protocol the CoprocessorProtocol implementation to call
-   * @param startKey start region selection with region containing this row
-   * @param endKey select regions up to and including the region containing
-   * this row
-   * @param callable wraps the CoprocessorProtocol implementation method calls
-   * made per-region
-   * @param callback an instance upon which
-   * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[], byte[], Object)} with the
-   * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call(Object)}
-   * return value for each region
-   * @param <T> CoprocessorProtocol subclass for the remote invocation
-   * @param <R> Return type for the
-   * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call(Object)}
-   * method
-   *
-   * @deprecated since 0.96.
-   * Use {@link HTableInterface#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)} instead.
-   */
-  @Deprecated
-  <T extends CoprocessorProtocol, R> void coprocessorExec(
-      Class<T> protocol, byte[] startKey, byte[] endKey,
-      Batch.Call<T,R> callable, Batch.Callback<R> callback)
-      throws IOException, Throwable;
-
-  /**
    * Creates and returns a {@link com.google.protobuf.RpcChannel} instance connected to the
    * table region containing the specified row.  The row given does not actually have
    * to exist.  Whichever region would contain the row based on start and end keys will

Modified: hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java?rev=1445918&r1=1445917&r2=1445918&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java Wed Feb 13 20:58:23 2013
@@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.HBaseConf
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.client.coprocessor.Batch;
 import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
-import org.apache.hadoop.hbase.ipc.CoprocessorProtocol;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.PoolMap;
@@ -41,7 +40,7 @@ import org.apache.hadoop.hbase.util.Pool
 
 /**
  * A simple pool of HTable instances.
- * 
+ *
  * Each HTablePool acts as a pool for all tables. To use, instantiate an
  * HTablePool and use {@link #getTable(String)} to get an HTable from the pool.
  *
@@ -51,12 +50,12 @@ import org.apache.hadoop.hbase.util.Pool
  * Once you are done with it, close your instance of {@link HTableInterface}
  * by calling {@link HTableInterface#close()} rather than returning the tables
  * to the pool with (deprecated) {@link #putTable(HTableInterface)}.
- * 
+ *
  * <p>
  * A pool can be created with a <i>maxSize</i> which defines the most HTable
  * references that will ever be retained for each table. Otherwise the default
  * is {@link Integer#MAX_VALUE}.
- * 
+ *
  * <p>
  * Pool will manage its own connections to the cluster. See
  * {@link HConnectionManager}.
@@ -79,7 +78,7 @@ public class HTablePool implements Close
 
   /**
    * Constructor to set maximum versions and use the specified configuration.
-   * 
+   *
    * @param config
    *          configuration
    * @param maxSize
@@ -92,7 +91,7 @@ public class HTablePool implements Close
   /**
    * Constructor to set maximum versions and use the specified configuration and
    * table factory.
-   * 
+   *
    * @param config
    *          configuration
    * @param maxSize
@@ -108,7 +107,7 @@ public class HTablePool implements Close
   /**
    * Constructor to set maximum versions and use the specified configuration and
    * pool type.
-   * 
+   *
    * @param config
    *          configuration
    * @param maxSize
@@ -128,7 +127,7 @@ public class HTablePool implements Close
    * {@link PoolType#Reusable} and {@link PoolType#ThreadLocal}. If the pool
    * type is null or not one of those two values, then it will default to
    * {@link PoolType#Reusable}.
-   * 
+   *
    * @param config
    *          configuration
    * @param maxSize
@@ -168,7 +167,7 @@ public class HTablePool implements Close
    * Get a reference to the specified table from the pool.
    * <p>
    * <p/>
-   * 
+   *
    * @param tableName
    *          table name
    * @return a reference to the specified table
@@ -186,9 +185,9 @@ public class HTablePool implements Close
   /**
    * Get a reference to the specified table from the pool.
    * <p>
-   * 
+   *
    * Create a new one if one is not available.
-   * 
+   *
    * @param tableName
    *          table name
    * @return a reference to the specified table
@@ -206,9 +205,9 @@ public class HTablePool implements Close
   /**
    * Get a reference to the specified table from the pool.
    * <p>
-   * 
+   *
    * Create a new one if one is not available.
-   * 
+   *
    * @param tableName
    *          table name
    * @return a reference to the specified table
@@ -222,7 +221,7 @@ public class HTablePool implements Close
   /**
    * This method is not needed anymore, clients should call
    * HTableInterface.close() rather than returning the tables to the pool
-   * 
+   *
    * @param table
    *          the proxy table user got from pool
    * @deprecated
@@ -248,10 +247,10 @@ public class HTablePool implements Close
   /**
    * Puts the specified HTable back into the pool.
    * <p>
-   * 
+   *
    * If the pool already contains <i>maxSize</i> references to the table, then
    * the table instance gets closed after flushing buffered edits.
-   * 
+   *
    * @param table
    *          table
    */
@@ -279,7 +278,7 @@ public class HTablePool implements Close
    * Note: this is a 'shutdown' of the given table pool and different from
    * {@link #putTable(HTableInterface)}, that is used to return the table
    * instance to the pool for future re-use.
-   * 
+   *
    * @param tableName
    */
   public void closeTablePool(final String tableName) throws IOException {
@@ -294,7 +293,7 @@ public class HTablePool implements Close
 
   /**
    * See {@link #closeTablePool(String)}.
-   * 
+   *
    * @param tableName
    */
   public void closeTablePool(final byte[] tableName) throws IOException {
@@ -314,7 +313,7 @@ public class HTablePool implements Close
     this.tables.clear();
   }
 
-  int getCurrentPoolSize(String tableName) {
+  public int getCurrentPoolSize(String tableName) {
     return tables.size(tableName);
   }
 
@@ -352,6 +351,11 @@ public class HTablePool implements Close
     }
 
     @Override
+    public Boolean[] exists(List<Get> gets) throws IOException {
+      return table.exists(gets);
+    }
+
+    @Override
     public void batch(List<? extends Row> actions, Object[] results) throws IOException,
         InterruptedException {
       table.batch(actions, results);
@@ -457,7 +461,7 @@ public class HTablePool implements Close
 
     /**
      * Returns the actual table back to the pool
-     * 
+     *
      * @throws IOException
      */
     public void close() throws IOException {
@@ -465,37 +469,6 @@ public class HTablePool implements Close
     }
 
     @Override
-    public RowLock lockRow(byte[] row) throws IOException {
-      return table.lockRow(row);
-    }
-
-    @Override
-    public void unlockRow(RowLock rl) throws IOException {
-      table.unlockRow(rl);
-    }
-
-    @Override
-    public <T extends CoprocessorProtocol> T coprocessorProxy(
-        Class<T> protocol, byte[] row) {
-      return table.coprocessorProxy(protocol, row);
-    }
-
-    @Override
-    public <T extends CoprocessorProtocol, R> Map<byte[], R> coprocessorExec(
-        Class<T> protocol, byte[] startKey, byte[] endKey,
-        Batch.Call<T, R> callable) throws IOException, Throwable {
-      return table.coprocessorExec(protocol, startKey, endKey, callable);
-    }
-
-    @Override
-    public <T extends CoprocessorProtocol, R> void coprocessorExec(
-        Class<T> protocol, byte[] startKey, byte[] endKey,
-        Batch.Call<T, R> callable, Batch.Callback<R> callback)
-        throws IOException, Throwable {
-      table.coprocessorExec(protocol, startKey, endKey, callable, callback);
-    }
-
-    @Override
     public CoprocessorRpcChannel coprocessorService(byte[] row) {
       return table.coprocessorService(row);
     }
@@ -521,7 +494,7 @@ public class HTablePool implements Close
 
     /**
      * Expose the wrapped HTable to tests in the same package
-     * 
+     *
      * @return wrapped htable
      */
     HTableInterface getWrappedTable() {



Mime
View raw message