hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject [6/6] hbase git commit: HBASE-12404 Task 5 from parent: Replace internal HTable constructor use with HConnection#getTable (0.98, 0.99)
Date Tue, 25 Nov 2014 16:20:04 GMT
HBASE-12404 Task 5 from parent: Replace internal HTable constructor use with
HConnection#getTable (0.98, 0.99)

Replaced HTable under hbase-*/src/main/java. Skipped tests. Would take
till end of time to do all and some cases are cryptic. Also skipped
some mapreduce where HTable comes through in API. Can do both of
these stragglers in another issue.
Generally, if a utility class or standalone class, tried to pass in a
Connection rather than have the utility or standalone create its own
connection on each invocation; e.g. the Quota stuff. Where not possible,
noted where invocation comes from... if test or hbck, didn't worry about
it.
Some classes are just standalone and nothing to be done to avoid
a Connection setup per invocation (this is probably how it worked
in the new HTable...days anyways). Some classes are not used:
AggregationClient, FavoredNodes... we should just purge this stuff.
Doc on what short circuit connection does (I can just use it...
I thought it was just for short circuit but no, it switches dependent
on where you are connecting).
Changed HConnection to super Interface ClusterConnection where safe (
internal usage by private classes only).
Doc cleanup in example usage so we do new mode rather than the old
fashion.
Used java7 idiom that allows you avoid writing out finally to call close
on implementations of Closeable.
Added a RegistryFactory.. moved it out from being inner class.
Added a utility createGetClosestRowOrBeforeReverseScan method to Scan
to create a Scan that can ...
Renamed getShortCircuitConnection as getConnection – users don't need
to know what implementation does (that it can short-circuit RPC).
The old name gave pause. I was frightened to use it thinking it only
for short-circuit reading – that it would not do remote too.
Squashed commit of the following:


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e6b43007
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e6b43007
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e6b43007

Branch: refs/heads/master
Commit: e6b4300756b7f09a31ba35cb3baf41d294ed6e14
Parents: f2be914
Author: stack <stack@apache.org>
Authored: Tue Nov 25 08:15:20 2014 -0800
Committer: stack <stack@apache.org>
Committed: Tue Nov 25 08:15:20 2014 -0800

----------------------------------------------------------------------
 .../apache/hadoop/hbase/MetaTableAccessor.java  |  38 +-
 .../hadoop/hbase/client/ConnectionAdapter.java  |  15 +-
 .../hadoop/hbase/client/ConnectionFactory.java  |   2 +-
 .../hadoop/hbase/client/ConnectionManager.java  |  17 +-
 .../hadoop/hbase/client/ConnectionUtils.java    |   4 +-
 .../org/apache/hadoop/hbase/client/HTable.java  |  36 +-
 .../apache/hadoop/hbase/client/MetaScanner.java |  91 +--
 .../apache/hadoop/hbase/client/Registry.java    |   7 +-
 .../hadoop/hbase/client/RegistryFactory.java    |  46 ++
 .../org/apache/hadoop/hbase/client/Scan.java    |  26 +-
 .../client/coprocessor/AggregationClient.java   |  99 +--
 .../hbase/client/coprocessor/package-info.java  |  22 +-
 .../hadoop/hbase/client/package-info.java       | 195 +++---
 .../hadoop/hbase/quotas/QuotaRetriever.java     |  31 +-
 .../hadoop/hbase/quotas/QuotaTableUtil.java     |  47 +-
 .../security/access/AccessControlClient.java    | 121 ++--
 .../security/visibility/VisibilityClient.java   | 181 +++---
 .../apache/hadoop/hbase/LocalHBaseCluster.java  |   6 +-
 .../java/org/apache/hadoop/hbase/Server.java    |  16 +-
 .../backup/example/ZKTableArchiveClient.java    |   6 +-
 .../hadoop/hbase/client/HTableWrapper.java      |  63 +-
 .../hadoop/hbase/mapred/HRegionPartitioner.java |  25 +-
 .../hadoop/hbase/mapred/TableMapReduceUtil.java |  18 +-
 .../hadoop/hbase/mapred/TableOutputFormat.java  |  52 +-
 .../DefaultVisibilityExpressionResolver.java    |  74 ++-
 .../hbase/mapreduce/HRegionPartitioner.java     |  18 +-
 .../hadoop/hbase/mapreduce/ImportTsv.java       | 157 ++---
 .../hbase/mapreduce/TableInputFormat.java       |   7 +-
 .../hbase/mapreduce/TableMapReduceUtil.java     |  13 +-
 .../hbase/mapreduce/TableOutputFormat.java      |   1 -
 .../hadoop/hbase/master/AssignmentManager.java  |   6 +-
 .../hadoop/hbase/master/CatalogJanitor.java     |  19 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |  12 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |   4 +-
 .../hadoop/hbase/master/RegionStateStore.java   |   4 +-
 .../hadoop/hbase/master/RegionStates.java       |   2 +-
 .../hbase/master/TableNamespaceManager.java     |  11 +-
 .../balancer/FavoredNodeAssignmentHelper.java   |  27 +-
 .../balancer/FavoredNodeLoadBalancer.java       |   2 +-
 .../master/handler/CreateTableHandler.java      |   4 +-
 .../master/handler/DeleteTableHandler.java      |   2 +-
 .../master/handler/DisableTableHandler.java     |   2 +-
 .../master/handler/EnableTableHandler.java      |   4 +-
 .../handler/MetaServerShutdownHandler.java      |   2 +-
 .../master/handler/ModifyTableHandler.java      |  23 +-
 .../hbase/master/handler/TableEventHandler.java |  46 +-
 .../master/handler/TruncateTableHandler.java    |   2 +-
 .../master/snapshot/CloneSnapshotHandler.java   |   2 +-
 .../master/snapshot/MasterSnapshotVerifier.java |   2 +-
 .../master/snapshot/RestoreSnapshotHandler.java |   4 +-
 .../hbase/master/snapshot/SnapshotManager.java  |   2 +-
 .../master/snapshot/TakeSnapshotHandler.java    |   2 +-
 .../flush/MasterFlushTableProcedureManager.java |   2 +-
 .../hadoop/hbase/quotas/MasterQuotaManager.java |  54 +-
 .../apache/hadoop/hbase/quotas/QuotaCache.java  |  13 +-
 .../apache/hadoop/hbase/quotas/QuotaUtil.java   | 102 ++-
 .../hbase/regionserver/HRegionServer.java       |  65 +-
 .../regionserver/RegionMergeTransaction.java    |   2 +-
 .../regionserver/ReplicationSyncUp.java         |  14 +-
 .../security/access/AccessControlLists.java     | 156 +++--
 .../hbase/security/access/AccessController.java |   2 +-
 .../hadoop/hbase/security/token/TokenUtil.java  |  31 +-
 .../visibility/VisibilityController.java        |   2 +-
 .../org/apache/hadoop/hbase/tool/Canary.java    | 140 ++--
 .../hadoop/hbase/util/RegionSplitter.java       | 651 ++++++++++---------
 .../apache/hadoop/hbase/wal/WALSplitter.java    |   2 +-
 .../hadoop/hbase/HBaseTestingUtility.java       |  50 +-
 .../apache/hadoop/hbase/MetaMockingUtil.java    |   4 +-
 .../hadoop/hbase/MockRegionServerServices.java  |   6 +-
 .../apache/hadoop/hbase/TestAcidGuarantees.java |   2 +-
 .../org/apache/hadoop/hbase/TestIOFencing.java  |  19 +-
 .../TestZooKeeperTableArchiveClient.java        |  14 +-
 .../hbase/client/HConnectionTestingUtility.java |  23 +-
 .../hbase/client/TestFromClientSide3.java       |  16 +-
 .../TestRegionObserverInterface.java            |   6 +-
 .../coprocessor/TestRegionServerObserver.java   |   2 +-
 .../hbase/io/encoding/TestChangingEncoding.java |  33 +-
 .../hbase/mapred/TestTableMapReduceUtil.java    |   2 -
 .../hadoop/hbase/master/MockRegionServer.java   |  10 +-
 .../hbase/master/TestActiveMasterManager.java   |   7 +-
 .../hadoop/hbase/master/TestCatalogJanitor.java |  27 +-
 .../hbase/master/TestClockSkewDetection.java    |  12 +-
 .../apache/hadoop/hbase/master/TestMaster.java  |   6 +-
 .../hadoop/hbase/master/TestMasterFailover.java |  15 +-
 .../hbase/master/TestMasterNoCluster.java       |   6 +-
 .../TestMasterOperationsForRegionReplicas.java  |  84 +--
 .../hadoop/hbase/master/TestMasterShutdown.java |  61 +-
 .../hadoop/hbase/master/TestRestartCluster.java |   4 +-
 .../hbase/master/TestSplitLogManager.java       |   5 +-
 .../hbase/master/cleaner/TestHFileCleaner.java  |   4 +-
 .../master/cleaner/TestHFileLinkCleaner.java    |   7 +-
 .../hbase/master/cleaner/TestLogsCleaner.java   |  14 +-
 .../hadoop/hbase/quotas/TestQuotaTableUtil.java |  62 +-
 .../regionserver/TestHRegionOnCluster.java      |   4 +-
 .../regionserver/TestHeapMemoryManager.java     |   4 +-
 .../TestRegionMergeTransactionOnCluster.java    |  20 +-
 .../hbase/regionserver/TestSplitLogWorker.java  |  14 +-
 .../TestSplitTransactionOnCluster.java          |   8 +-
 .../replication/TestPerTableCFReplication.java  | 242 +++----
 .../replication/TestReplicationStateZKImpl.java |  15 +-
 .../TestReplicationTrackerZKImpl.java           |  19 +-
 .../TestReplicationSourceManager.java           |  20 +-
 .../security/access/TestAccessController.java   | 107 ++-
 .../security/access/TestAccessController2.java  |  22 +-
 .../security/token/TestTokenAuthentication.java |   7 +-
 .../apache/hadoop/hbase/util/MockServer.java    |   6 +-
 .../apache/hadoop/hbase/util/TestHBaseFsck.java |  17 +-
 .../util/hbck/OfflineMetaRebuildTestCore.java   |  39 +-
 .../util/hbck/TestOfflineMetaRebuildBase.java   |  34 +-
 .../util/hbck/TestOfflineMetaRebuildHole.java   |   9 +-
 .../hbck/TestOfflineMetaRebuildOverlap.java     |   6 +-
 111 files changed, 2080 insertions(+), 1868 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index 3282838..36cc67a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -34,11 +34,13 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
@@ -173,13 +175,18 @@ public class MetaTableAccessor {
    * @throws IOException
    * @SuppressWarnings("deprecation")
    */
-  private static Table getHTable(final Connection connection,
-      final TableName tableName)
+  private static Table getHTable(final Connection connection, final TableName tableName)
   throws IOException {
     // We used to pass whole CatalogTracker in here, now we just pass in Connection
     if (connection == null || connection.isClosed()) {
       throw new NullPointerException("No connection");
     }
+    // If the passed in 'connection' is 'managed' -- i.e. every second test uses
+    // an HTable or an HBaseAdmin with managed connections -- then doing
+    // connection.getTable will throw an exception saying you are NOT to use
+    // managed connections getting tables.  Leaving this as it is for now. Will
+    // revisit when inclined to change all tests.  User code probaby makes use of
+    // managed connections too so don't change it till post hbase 1.0.
     return new HTable(tableName, connection);
   }
 
@@ -216,8 +223,7 @@ public class MetaTableAccessor {
    * @deprecated use {@link #getRegionLocation(Connection, byte[])} instead
    */
   @Deprecated
-  public static Pair<HRegionInfo, ServerName> getRegion(
-    Connection connection, byte [] regionName)
+  public static Pair<HRegionInfo, ServerName> getRegion(Connection connection, byte [] regionName)
     throws IOException {
     HRegionLocation location = getRegionLocation(connection, regionName);
     return location == null
@@ -886,12 +892,24 @@ public class MetaTableAccessor {
    * @throws IOException
    */
   public static int getRegionCount(final Configuration c, final TableName tableName)
-      throws IOException {
-    HTable t = new HTable(c, tableName);
-    try {
-      return t.getRegionLocations().size();
-    } finally {
-      t.close();
+  throws IOException {
+    try (Connection connection = ConnectionFactory.createConnection(c)) {
+      return getRegionCount(connection, tableName);
+    }
+  }
+
+  /**
+   * Count regions in <code>hbase:meta</code> for passed table.
+   * @param connection Connection object
+   * @param tableName table name to count regions for
+   * @return Count or regions in table <code>tableName</code>
+   * @throws IOException
+   */
+  public static int getRegionCount(final Connection connection, final TableName tableName)
+  throws IOException {
+    try (RegionLocator locator = connection.getRegionLocator(tableName)) {
+      List<HRegionLocation> locations = locator.getAllRegionLocations();
+      return locations == null? 0: locations.size();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
index 80fa14d..d8856ad 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
@@ -36,21 +36,18 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
 
 /**
- * An internal class that adapts a {@link HConnection}.
- * HConnection is created from HConnectionManager. The default
- * implementation talks to region servers over RPC since it
- * doesn't know if the connection is used by one region server
- * itself. This adapter makes it possible to change some of the
- * default logic. Especially, when the connection is used
- * internally by some the region server.
+ * An internal class that delegates to an {@link HConnection} instance.
+ * A convenience to override when customizing method implementations.
+ * 
  *
  * @see ConnectionUtils#createShortCircuitHConnection(HConnection, ServerName,
- * AdminService.BlockingInterface, ClientService.BlockingInterface)
+ * AdminService.BlockingInterface, ClientService.BlockingInterface) for case where we make
+ * Connections skip RPC if request is to local server.
  */
 @InterfaceAudience.Private
 @SuppressWarnings("deprecation")
 //NOTE: DO NOT make this class public. It was made package-private on purpose.
-class ConnectionAdapter implements ClusterConnection {
+abstract class ConnectionAdapter implements ClusterConnection {
 
   private final ClusterConnection wrappedConnection;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
index 374ce28..b489af2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.security.UserProvider;
  * A non-instantiable class that manages creation of {@link Connection}s.
  * Managing the lifecycle of the {@link Connection}s to the cluster is the responsibility of
  * the caller.
- * From this {@link Connection} {@link Table} implementations are retrieved
+ * From a {@link Connection}, {@link Table} implementations are retrieved
  * with {@link Connection#getTable(TableName)}. Example:
  * <pre>
  * Connection connection = ConnectionFactory.createConnection(config);

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
index 147a203..9d38549 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
@@ -180,7 +180,7 @@ import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
 
 /**
- * An internal, A non-instantiable class that manages creation of {@link HConnection}s.
+ * An internal, non-instantiable class that manages creation of {@link HConnection}s.
  */
 @SuppressWarnings("serial")
 @InterfaceAudience.Private
@@ -774,16 +774,7 @@ class ConnectionManager {
      * @throws IOException
      */
     private Registry setupRegistry() throws IOException {
-      String registryClass = this.conf.get("hbase.client.registry.impl",
-        ZooKeeperRegistry.class.getName());
-      Registry registry = null;
-      try {
-        registry = (Registry)Class.forName(registryClass).newInstance();
-      } catch (Throwable t) {
-        throw new IOException(t);
-      }
-      registry.init(this);
-      return registry;
+      return RegistryFactory.getRegistry(this);
     }
 
     /**
@@ -1010,8 +1001,8 @@ class ConnectionManager {
     @Override
     public List<HRegionLocation> locateRegions(final TableName tableName,
         final boolean useCache, final boolean offlined) throws IOException {
-      NavigableMap<HRegionInfo, ServerName> regions = MetaScanner.allTableRegions(conf, this,
-          tableName);
+      NavigableMap<HRegionInfo, ServerName> regions =
+        MetaScanner.allTableRegions(conf, this, tableName);
       final List<HRegionLocation> locations = new ArrayList<HRegionLocation>();
       for (HRegionInfo regionInfo : regions.keySet()) {
         RegionLocations list = locateRegion(tableName, regionInfo.getStartKey(), useCache, true);

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index 72b447a..4d6a36c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -104,14 +104,14 @@ public class ConnectionUtils {
 
   /**
    * Adapt a HConnection so that it can bypass the RPC layer (serialization,
-   * deserialization, networking, etc..) when it talks to a local server.
+   * deserialization, networking, etc..) -- i.e. short-circuit -- when talking to a local server.
    * @param conn the connection to adapt
    * @param serverName the local server name
    * @param admin the admin interface of the local server
    * @param client the client interface of the local server
    * @return an adapted/decorated HConnection
    */
-  public static HConnection createShortCircuitHConnection(final Connection conn,
+  public static ClusterConnection createShortCircuitHConnection(final Connection conn,
       final ServerName serverName, final AdminService.BlockingInterface admin,
       final ClientService.BlockingInterface client) {
     return new ConnectionAdapter(conn) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index c3a94e3..2c21838 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -80,24 +80,24 @@ import com.google.protobuf.Service;
 import com.google.protobuf.ServiceException;
 
 /**
- *
- * HTable is no longer a client API. It is marked InterfaceAudience.Private indicating that
- * this is an HBase-internal class as defined in
- * https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html
- * There are no guarantees for backwards source / binary compatibility and methods or class can
- * change or go away without deprecation. Use {@link Connection#getTable(TableName)}
- * to obtain an instance of {@link Table} instead of constructing an HTable directly.
- * <p>An implementation of {@link Table}. Used to communicate with a single HBase table.
+ * An implementation of {@link Table}. Used to communicate with a single HBase table.
  * Lightweight. Get as needed and just close when done.
  * Instances of this class SHOULD NOT be constructed directly.
  * Obtain an instance via {@link Connection}. See {@link ConnectionFactory}
  * class comment for an example of how.
  *
- * <p>This class is NOT thread safe for reads nor write.
+ * <p>This class is NOT thread safe for reads nor writes.
  * In the case of writes (Put, Delete), the underlying write buffer can
  * be corrupted if multiple threads contend over a single HTable instance.
  * In the case of reads, some fields used by a Scan are shared among all threads.
  *
+ * <p>HTable is no longer a client API. Use {@link Table} instead. It is marked
+ * InterfaceAudience.Private indicating that this is an HBase-internal class as defined in
+ * <a href="https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html">Hadoop
+ * Interface Classification</a>
+ * There are no guarantees for backwards source / binary compatibility and methods or class can
+ * change or go away without deprecation.
+ *
  * @see Table
  * @see Admin
  * @see Connection
@@ -163,8 +163,6 @@ public class HTable implements HTableInterface, RegionLocator {
     this(conf, TableName.valueOf(tableName));
   }
 
-
-
   /**
    * Creates an object to access a HBase table.
    * @param conf Configuration object to use.
@@ -291,6 +289,8 @@ public class HTable implements HTableInterface, RegionLocator {
 
   /**
    * Creates an object to access a HBase table.
+   * Used by HBase internally.  DO NOT USE. See {@link ConnectionFactory} class comment for how to
+   * get a {@link Table} instance (use {@link Table} instead of {@link HTable}).
    * @param tableName Name of the table.
    * @param connection HConnection to be used.
    * @param pool ExecutorService to be used.
@@ -1794,20 +1794,6 @@ public class HTable implements HTableInterface, RegionLocator {
   }
 
   /**
-   * Run basic test.
-   * @param args Pass table name and row and will get the content.
-   * @throws IOException
-   */
-  public static void main(String[] args) throws IOException {
-    Table t = new HTable(HBaseConfiguration.create(), args[0]);
-    try {
-      System.out.println(t.get(new Get(Bytes.toBytes(args[1]))));
-    } finally {
-      t.close();
-    }
-  }
-
-  /**
    * {@inheritDoc}
    */
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
index 5312dfb..e171f4a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
@@ -41,6 +41,8 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ExceptionUtil;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * Scanner class that contains the <code>hbase:meta</code> table scanning logic.
  * Provided visitors will be called for each row.
@@ -59,13 +61,16 @@ public class MetaScanner {
   /**
    * Scans the meta table and calls a visitor on each RowResult and uses a empty
    * start row value as table name.
+   * 
+   * <p>Visible for testing. Use {@link
+   * #metaScan(Configuration, Connection, MetaScannerVisitor, TableName)} instead.
    *
    * @param configuration conf
    * @param visitor A custom visitor
    * @throws IOException e
    */
-  public static void metaScan(Configuration configuration,
-      MetaScannerVisitor visitor)
+  @VisibleForTesting // Do not use. Used by tests only and hbck.
+  public static void metaScan(Configuration configuration, MetaScannerVisitor visitor)
   throws IOException {
     metaScan(configuration, visitor, null, null, Integer.MAX_VALUE);
   }
@@ -91,6 +96,9 @@ public class MetaScanner {
    * Scans the meta table and calls a visitor on each RowResult. Uses a table
    * name and a row name to locate meta regions. And it only scans at most
    * <code>rowLimit</code> of rows.
+   * 
+   * <p>Visible for testing. Use {@link
+   * #metaScan(Configuration, Connection, MetaScannerVisitor, TableName)} instead.
    *
    * @param configuration HBase configuration.
    * @param visitor Visitor object.
@@ -102,12 +110,12 @@ public class MetaScanner {
    * will be set to default value <code>Integer.MAX_VALUE</code>.
    * @throws IOException e
    */
+  @VisibleForTesting // Do not use. Used by Master but by a method that is used testing.
   public static void metaScan(Configuration configuration,
       MetaScannerVisitor visitor, TableName userTableName, byte[] row,
       int rowLimit)
   throws IOException {
-    metaScan(configuration, null, visitor, userTableName, row, rowLimit,
-      TableName.META_TABLE_NAME);
+    metaScan(configuration, null, visitor, userTableName, row, rowLimit, TableName.META_TABLE_NAME);
   }
 
   /**
@@ -133,7 +141,7 @@ public class MetaScanner {
     throws IOException {
 
     boolean closeConnection = false;
-    if (connection == null){
+    if (connection == null) {
       connection = ConnectionFactory.createConnection(configuration);
       closeConnection = true;
     }
@@ -141,25 +149,24 @@ public class MetaScanner {
     int rowUpperLimit = rowLimit > 0 ? rowLimit: Integer.MAX_VALUE;
     // Calculate startrow for scan.
     byte[] startRow;
-    ResultScanner scanner = null;
-    HTable metaTable = null;
-    try {
-      metaTable = new HTable(TableName.META_TABLE_NAME, connection, null);
+    // If the passed in 'connection' is 'managed' -- i.e. every second test uses
+    // an HTable or an HBaseAdmin with managed connections -- then doing
+    // connection.getTable will throw an exception saying you are NOT to use
+    // managed connections getting tables.  Leaving this as it is for now. Will
+    // revisit when inclined to change all tests.  User code probaby makes use of
+    // managed connections too so don't change it till post hbase 1.0.
+    try (Table metaTable = new HTable(TableName.META_TABLE_NAME, connection, null)) {
       if (row != null) {
         // Scan starting at a particular row in a particular table
-        byte[] searchRow = HRegionInfo.createRegionName(tableName, row, HConstants.NINES, false);
-
-        Result startRowResult = metaTable.getRowOrBefore(searchRow, HConstants.CATALOG_FAMILY);
-
+        Result startRowResult = getClosestRowOrBefore(metaTable, tableName, row);
         if (startRowResult == null) {
-          throw new TableNotFoundException("Cannot find row in "+ TableName
-              .META_TABLE_NAME.getNameAsString()+" for table: "
-              + tableName + ", row=" + Bytes.toStringBinary(searchRow));
+          throw new TableNotFoundException("Cannot find row in " + metaTable.getName() +
+            " for table: " + tableName + ", row=" + Bytes.toStringBinary(row));
         }
         HRegionInfo regionInfo = getHRegionInfo(startRowResult);
         if (regionInfo == null) {
           throw new IOException("HRegionInfo was null or empty in Meta for " +
-            tableName + ", row=" + Bytes.toStringBinary(searchRow));
+            tableName + ", row=" + Bytes.toStringBinary(row));
         }
         byte[] rowBefore = regionInfo.getStartKey();
         startRow = HRegionInfo.createRegionName(tableName, rowBefore, HConstants.ZEROES, false);
@@ -184,25 +191,18 @@ public class MetaScanner {
           Bytes.toStringBinary(startRow) + " for max=" + rowUpperLimit + " with caching=" + rows);
       }
       // Run the scan
-      scanner = metaTable.getScanner(scan);
-      Result result;
-      int processedRows = 0;
-      while ((result = scanner.next()) != null) {
-        if (visitor != null) {
-          if (!visitor.processRow(result)) break;
+      try (ResultScanner resultScanner = metaTable.getScanner(scan)) {
+        Result result;
+        int processedRows = 0;
+        while ((result = resultScanner.next()) != null) {
+          if (visitor != null) {
+            if (!visitor.processRow(result)) break;
+          }
+          processedRows++;
+          if (processedRows >= rowUpperLimit) break;
         }
-        processedRows++;
-        if (processedRows >= rowUpperLimit) break;
       }
     } finally {
-      if (scanner != null) {
-        try {
-          scanner.close();
-        } catch (Throwable t) {
-          ExceptionUtil.rethrowIfInterrupt(t);
-          LOG.debug("Got exception in closing the result scanner", t);
-        }
-      }
       if (visitor != null) {
         try {
           visitor.close();
@@ -211,21 +211,27 @@ public class MetaScanner {
           LOG.debug("Got exception in closing the meta scanner visitor", t);
         }
       }
-      if (metaTable != null) {
-        try {
-          metaTable.close();
-        } catch (Throwable t) {
-          ExceptionUtil.rethrowIfInterrupt(t);
-          LOG.debug("Got exception in closing meta table", t);
-        }
-      }
       if (closeConnection) {
-        connection.close();
+        if (connection != null) connection.close();
       }
     }
   }
 
   /**
+   * @return Get closest metatable region row to passed <code>row</code>
+   * @throws IOException
+   */
+  private static Result getClosestRowOrBefore(final Table metaTable, final TableName userTableName,
+      final byte [] row)
+  throws IOException {
+    byte[] searchRow = HRegionInfo.createRegionName(userTableName, row, HConstants.NINES, false);
+    Scan scan = Scan.createGetClosestRowOrBeforeReverseScan(searchRow);
+    try (ResultScanner resultScanner = metaTable.getScanner(scan)) {
+      return resultScanner.next();
+    }
+  }
+
+  /**
    * Returns HRegionInfo object from the column
    * HConstants.CATALOG_FAMILY:HConstants.REGIONINFO_QUALIFIER of the catalog
    * table Result.
@@ -246,6 +252,7 @@ public class MetaScanner {
    * @return List of all user-space regions.
    * @throws IOException
    */
+  @VisibleForTesting // And for hbck.
   public static List<HRegionInfo> listAllRegions(Configuration conf, final boolean offlined)
   throws IOException {
     final List<HRegionInfo> regions = new ArrayList<HRegionInfo>();

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java
index c6ed801..412e4fa 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java
@@ -20,11 +20,14 @@ package org.apache.hadoop.hbase.client;
 import java.io.IOException;
 
 import org.apache.hadoop.hbase.RegionLocations;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 
 /**
  * Cluster registry.
- * Implemenations hold cluster information such as this cluster's id, location of hbase:meta, etc.
+ * Implementations hold cluster information such as this cluster's id, location of hbase:meta, etc.
+ * Internal use only.
  */
+@InterfaceAudience.Private
 interface Registry {
   /**
    * @param connection
@@ -47,4 +50,4 @@ interface Registry {
    * @throws IOException
    */
   int getCurrentNrHRS() throws IOException;
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryFactory.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryFactory.java
new file mode 100644
index 0000000..dc2cb7c
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryFactory.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+/**
+ * Get instance of configured Registry.
+ */
+@InterfaceAudience.Private
+class RegistryFactory {
+  /**
+   * @return The cluster registry implementation to use.
+   * @throws IOException
+   */
+  static Registry getRegistry(final Connection connection)
+  throws IOException {
+    String registryClass = connection.getConfiguration().get("hbase.client.registry.impl",
+      ZooKeeperRegistry.class.getName());
+    Registry registry = null;
+    try {
+      registry = (Registry)Class.forName(registryClass).newInstance();
+    } catch (Throwable t) {
+      throw new IOException(t);
+    }
+    registry.init(connection);
+    return registry;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
index 2aea19f..e4323bf 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
@@ -52,8 +52,8 @@ import org.apache.hadoop.hbase.util.Bytes;
  * To scan everything for each row, instantiate a Scan object.
  * <p>
  * To modify scanner caching for just this scan, use {@link #setCaching(int) setCaching}.
- * If caching is NOT set, we will use the caching value of the hosting {@link HTable}.  See
- * {@link HTable#setScannerCaching(int)}. In addition to row caching, it is possible to specify a
+ * If caching is NOT set, we will use the caching value of the hosting {@link Table}.
+ * In addition to row caching, it is possible to specify a
  * maximum result size, using {@link #setMaxResultSize(long)}. When both are used,
  * single server requests are limited by either number of rows or maximum result size, whichever
  * limit comes first.
@@ -478,7 +478,8 @@ public class Scan extends Query {
 
   /**
    * Set the number of rows for caching that will be passed to scanners.
-   * If not set, the default setting from {@link HTable#getScannerCaching()} will apply.
+   * If not set, the Configuration setting {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} will
+   * apply.
    * Higher caching values will enable faster scanners but will use more memory.
    * @param caching the number of rows for caching
    */
@@ -894,4 +895,21 @@ public class Scan extends Query {
     return (Scan) super.setIsolationLevel(level);
   }
 
-}
+  /**
+   * Utility that creates a Scan that will do a  small scan in reverse from passed row
+   * looking for next closest row.
+   * @param row
+   * @param family
+   * @return An instance of Scan primed with passed <code>row</code> and <code>family</code> to
+   * scan in reverse for one row only.
+   */
+  static Scan createGetClosestRowOrBeforeReverseScan(byte[] row) {
+    // Below does not work if you add in family; need to add the family qualifier that is highest
+    // possible family qualifier.  Do we have such a notion?  Would have to be magic.
+    Scan scan = new Scan(row);
+    scan.setSmall(true);
+    scan.setReversed(true);
+    scan.setCaching(1);
+    return scan;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
index 85ce4e2..7b7cd16 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
@@ -19,6 +19,7 @@
 
 package org.apache.hadoop.hbase.client.coprocessor;
 
+import java.io.Closeable;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
@@ -36,7 +37,8 @@ import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
@@ -72,19 +74,32 @@ import com.google.protobuf.Message;
  * <li>For methods to find maximum, minimum, sum, rowcount, it returns the
  * parameter type. For average and std, it returns a double value. For row
  * count, it returns a long value.
+ * <p>Call {@link #close()} when done.
  */
 @InterfaceAudience.Private
-public class AggregationClient {
-
+public class AggregationClient implements Closeable {
+  // TODO: This class is not used.  Move to examples?
   private static final Log log = LogFactory.getLog(AggregationClient.class);
-  Configuration conf;
+  private final Connection connection;
 
   /**
    * Constructor with Conf object
    * @param cfg
    */
   public AggregationClient(Configuration cfg) {
-    this.conf = cfg;
+    try {
+      // Create a connection on construction. Will use it making each of the calls below.
+      this.connection = ConnectionFactory.createConnection(cfg);
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  @Override
+  public void close() throws IOException {
+    if (this.connection != null && !this.connection.isClosed()) {
+      this.connection.close();
+    }
   }
 
   /**
@@ -101,15 +116,9 @@ public class AggregationClient {
    */
   public <R, S, P extends Message, Q extends Message, T extends Message> R max(
       final TableName tableName, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan)
-      throws Throwable {
-    Table table = null;
-    try {
-      table = new HTable(conf, tableName);
+  throws Throwable {
+    try (Table table = connection.getTable(tableName)) {
       return max(table, ci, scan);
-    } finally {
-      if (table != null) {
-        table.close();
-      }
     }
   }
 
@@ -196,15 +205,9 @@ public class AggregationClient {
    */
   public <R, S, P extends Message, Q extends Message, T extends Message> R min(
       final TableName tableName, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan)
-      throws Throwable {
-    Table table = null;
-    try {
-      table = new HTable(conf, tableName);
+  throws Throwable {
+    try (Table table = connection.getTable(tableName)) {
       return min(table, ci, scan);
-    } finally {
-      if (table != null) {
-        table.close();
-      }
     }
   }
 
@@ -276,15 +279,9 @@ public class AggregationClient {
    */
   public <R, S, P extends Message, Q extends Message, T extends Message> long rowCount(
       final TableName tableName, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan)
-      throws Throwable {
-    Table table = null;
-    try {
-      table = new HTable(conf, tableName);
-      return rowCount(table, ci, scan);
-    } finally {
-      if (table != null) {
-        table.close();
-      }
+  throws Throwable {
+    try (Table table = connection.getTable(tableName)) {
+        return rowCount(table, ci, scan);
     }
   }
 
@@ -350,15 +347,9 @@ public class AggregationClient {
    */
   public <R, S, P extends Message, Q extends Message, T extends Message> S sum(
       final TableName tableName, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan)
-      throws Throwable {
-    Table table = null;
-    try {
-      table = new HTable(conf, tableName);
-      return sum(table, ci, scan);
-    } finally {
-      if (table != null) {
-        table.close();
-      }
+  throws Throwable {
+    try (Table table = connection.getTable(tableName)) {
+        return sum(table, ci, scan);
     }
   }
 
@@ -424,14 +415,8 @@ public class AggregationClient {
   private <R, S, P extends Message, Q extends Message, T extends Message> Pair<S, Long> getAvgArgs(
       final TableName tableName, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan)
       throws Throwable {
-    Table table = null;
-    try {
-      table = new HTable(conf, tableName);
-      return getAvgArgs(table, ci, scan);
-    } finally {
-      if (table != null) {
-        table.close();
-      }
+    try (Table table = connection.getTable(tableName)) {
+        return getAvgArgs(table, ci, scan);
     }
   }
 
@@ -615,14 +600,8 @@ public class AggregationClient {
   public <R, S, P extends Message, Q extends Message, T extends Message>
   double std(final TableName tableName, ColumnInterpreter<R, S, P, Q, T> ci,
       Scan scan) throws Throwable {
-    Table table = null;
-    try {
-      table = new HTable(conf, tableName);
-      return std(table, ci, scan);
-    } finally {
-      if (table != null) {
-        table.close();
-      }
+    try (Table table = connection.getTable(tableName)) {
+        return std(table, ci, scan);
     }
   }
 
@@ -728,14 +707,8 @@ public class AggregationClient {
   public <R, S, P extends Message, Q extends Message, T extends Message>
   R median(final TableName tableName, ColumnInterpreter<R, S, P, Q, T> ci,
       Scan scan) throws Throwable {
-    Table table = null;
-    try {
-      table = new HTable(conf, tableName);
-      return median(table, ci, scan);
-    } finally {
-      if (table != null) {
-        table.close();
-      }
+    try (Table table = connection.getTable(tableName)) {
+        return median(table, ci, scan);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/package-info.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/package-info.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/package-info.java
index edb3c22..8af120f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/package-info.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/package-info.java
@@ -50,9 +50,9 @@ must:
  method should return a reference to the Endpoint's protocol buffer Service instance.
 </ul>
 Clients may then call the defined service methods on coprocessor instances via
-the {@link org.apache.hadoop.hbase.client.HTable#coprocessorService(byte[])},
-{@link org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)}, and
-{@link org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)}
+the {@link org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])},
+{@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)}, and
+{@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)}
 methods.
 </p>
 
@@ -65,21 +65,21 @@ to identify which regions should be used for the method invocations.  Clients
 can call coprocessor Service methods against either:
 <ul>
  <li><strong>a single region</strong> - calling
-   {@link org.apache.hadoop.hbase.client.HTable#coprocessorService(byte[])}
+   {@link org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])}
    with a single row key.  This returns a {@link org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel}
    instance which communicates with the region containing the given row key (even if the
    row does not exist) as the RPC endpoint.  Clients can then use the {@code CoprocessorRpcChannel}
    instance in creating a new Service stub to call RPC methods on the region's coprocessor.</li>
  <li><strong>a range of regions</strong> - calling
-   {@link org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)}
-   or {@link org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)}
+   {@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)}
+   or {@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)}
    with a starting row key and an ending row key.  All regions in the table
    from the region containing the start row key to the region containing the end
    row key (inclusive), will we used as the RPC endpoints.</li>
 </ul>
 </p>
 
-<p><em>Note that the row keys passed as parameters to the <code>HTable</code>
+<p><em>Note that the row keys passed as parameters to the <code>Table</code>
 methods are not passed directly to the coprocessor Service implementations.
 They are only used to identify the regions for endpoints of the remote calls.
 </em></p>
@@ -160,7 +160,8 @@ use:
 
 <div style="background-color: #cccccc; padding: 2px">
 <blockquote><pre>
-HTable table = new HTable(conf, "mytable");
+Connection connection = ConnectionFactory.createConnection(conf);
+Table table = connection.getTable(TableName.valueOf("mytable"));
 final ExampleProtos.CountRequest request = ExampleProtos.CountRequest.getDefaultInstance();
 Map<byte[],Long> results = table.coprocessorService(
     ExampleProtos.RowCountService.class, // the protocol interface we're invoking
@@ -186,7 +187,7 @@ of <code>mytable</code>, keyed by the region name.
 By implementing {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call}
 as an anonymous class, we can invoke <code>RowCountService</code> methods
 directly against the {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call(Object)}
-method's argument.  Calling {@link org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)}
+method's argument.  Calling {@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)}
 will take care of invoking <code>Batch.Call.call()</code> against our anonymous class
 with the <code>RowCountService</code> instance for each table region.
 </p>
@@ -199,7 +200,8 @@ like to combine row count and key-value count for each region:
 
 <div style="background-color: #cccccc; padding: 2px">
 <blockquote><pre>
-HTable table = new HTable(conf, "mytable");
+Connection connection = ConnectionFactory.createConnection(conf);
+Table table = connection.getTable(TableName.valueOf("mytable"));
 // combine row count and kv count for region
 final ExampleProtos.CountRequest request = ExampleProtos.CountRequest.getDefaultInstance();
 Map<byte[],Long> results = table.coprocessorService(

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java
index e808904..10261cd 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java
@@ -28,23 +28,26 @@ Provides HBase Client
 
  <h2><a name="overview">Overview</a></h2>
  <p>To administer HBase, create and drop tables, list and alter tables,
- use {@link org.apache.hadoop.hbase.client.HBaseAdmin}.  Once created, table access is via an instance
- of {@link org.apache.hadoop.hbase.client.HTable}.  You add content to a table a row at a time.  To insert,
- create an instance of a {@link org.apache.hadoop.hbase.client.Put} object.  Specify value, target column
- and optionally a timestamp.  Commit your update using {@link org.apache.hadoop.hbase.client.HTable#put(Put)}.
- To fetch your inserted value, use {@link org.apache.hadoop.hbase.client.Get}.  The Get can be specified to be broad -- get all
- on a particular row -- or narrow; i.e. return only a single cell value.   After creating an instance of
- Get, invoke {@link org.apache.hadoop.hbase.client.HTable#get(Get)}.  Use
- {@link org.apache.hadoop.hbase.client.Scan} to set up a scanner -- a Cursor- like access.  After
- creating and configuring your Scan instance, call {@link org.apache.hadoop.hbase.client.HTable#getScanner(Scan)} and then
- invoke next on the returned object.  Both {@link org.apache.hadoop.hbase.client.HTable#get(Get)} and
- {@link org.apache.hadoop.hbase.client.HTable#getScanner(Scan)} return a
+ use {@link org.apache.hadoop.hbase.client.Admin}.  Once created, table access is via an instance
+ of {@link org.apache.hadoop.hbase.client.Table}.  You add content to a table a row at a time.  To
+ insert, create an instance of a {@link org.apache.hadoop.hbase.client.Put} object.  Specify value,
+ target column and optionally a timestamp.  Commit your update using
+ {@link org.apache.hadoop.hbase.client.Table#put(Put)}.
+ To fetch your inserted value, use {@link org.apache.hadoop.hbase.client.Get}.  The Get can be
+ specified to be broad -- get all on a particular row -- or narrow; i.e. return only a single cell
+ value.   After creating an instance of
+ Get, invoke {@link org.apache.hadoop.hbase.client.Table#get(Get)}.
+
+ <p>Use {@link org.apache.hadoop.hbase.client.Scan} to set up a scanner -- a Cursor- like access.
+ After creating and configuring your Scan instance, call
+ {@link org.apache.hadoop.hbase.client.Table#getScanner(Scan)} and then
+ invoke next on the returned object.  Both {@link org.apache.hadoop.hbase.client.Table#get(Get)}
+ and {@link org.apache.hadoop.hbase.client.Table#getScanner(Scan)} return a
 {@link org.apache.hadoop.hbase.client.Result}.
-A Result is a List of {@link org.apache.hadoop.hbase.KeyValue}s.  It has facility for packaging the return
-in different formats.
- Use {@link org.apache.hadoop.hbase.client.Delete} to remove content.
+
+<p>Use {@link org.apache.hadoop.hbase.client.Delete} to remove content.
  You can remove individual cells or entire families, etc.  Pass it to
- {@link org.apache.hadoop.hbase.client.HTable#delete(Delete)} to execute.
+ {@link org.apache.hadoop.hbase.client.Table#delete(Delete)} to execute.
  </p>
  <p>Puts, Gets and Deletes take out a lock on the target row for the duration of their operation.
  Concurrent modifications to a single row are serialized.  Gets and scans run concurrently without
@@ -68,8 +71,11 @@ in different formats.
 import java.io.IOException;
 
 import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
@@ -87,80 +93,97 @@ public class MyLittleHBaseClient {
     // be found on the CLASSPATH
     Configuration config = HBaseConfiguration.create();
 
-    // This instantiates an HTable object that connects you to
-    // the "myLittleHBaseTable" table.
-    HTable table = new HTable(config, "myLittleHBaseTable");
-
-    // To add to a row, use Put.  A Put constructor takes the name of the row
-    // you want to insert into as a byte array.  In HBase, the Bytes class has
-    // utility for converting all kinds of java types to byte arrays.  In the
-    // below, we are converting the String "myLittleRow" into a byte array to
-    // use as a row key for our update. Once you have a Put instance, you can
-    // adorn it by setting the names of columns you want to update on the row,
-    // the timestamp to use in your update, etc.If no timestamp, the server
-    // applies current time to the edits.
-    Put p = new Put(Bytes.toBytes("myLittleRow"));
-
-    // To set the value you'd like to update in the row 'myLittleRow', specify
-    // the column family, column qualifier, and value of the table cell you'd
-    // like to update.  The column family must already exist in your table
-    // schema.  The qualifier can be anything.  All must be specified as byte
-    // arrays as hbase is all about byte arrays.  Lets pretend the table
-    // 'myLittleHBaseTable' was created with a family 'myLittleFamily'.
-    p.add(Bytes.toBytes("myLittleFamily"), Bytes.toBytes("someQualifier"),
-      Bytes.toBytes("Some Value"));
-
-    // Once you've adorned your Put instance with all the updates you want to
-    // make, to commit it do the following (The HTable#put method takes the
-    // Put instance you've been building and pushes the changes you made into
-    // hbase)
-    table.put(p);
-
-    // Now, to retrieve the data we just wrote. The values that come back are
-    // Result instances. Generally, a Result is an object that will package up
-    // the hbase return into the form you find most palatable.
-    Get g = new Get(Bytes.toBytes("myLittleRow"));
-    Result r = table.get(g);
-    byte [] value = r.getValue(Bytes.toBytes("myLittleFamily"),
-      Bytes.toBytes("someQualifier"));
-    // If we convert the value bytes, we should get back 'Some Value', the
-    // value we inserted at this location.
-    String valueStr = Bytes.toString(value);
-    System.out.println("GET: " + valueStr);
-
-    // Sometimes, you won't know the row you're looking for. In this case, you
-    // use a Scanner. This will give you cursor-like interface to the contents
-    // of the table.  To set up a Scanner, do like you did above making a Put
-    // and a Get, create a Scan.  Adorn it with column names, etc.
-    Scan s = new Scan();
-    s.addColumn(Bytes.toBytes("myLittleFamily"), Bytes.toBytes("someQualifier"));
-    ResultScanner scanner = table.getScanner(s);
+    // Next you need a Connection to the cluster. Create one. When done with it,
+    // close it (Should start a try/finally after this creation so it gets closed
+    // for sure but leaving this out for readibility's sake).
+    Connection connection = ConnectionFactory.createConnection(config);
     try {
-      // Scanners return Result instances.
-      // Now, for the actual iteration. One way is to use a while loop like so:
-      for (Result rr = scanner.next(); rr != null; rr = scanner.next()) {
-        // print out the row we found and the columns we were looking for
-        System.out.println("Found row: " + rr);
-      }
-
-      // The other approach is to use a foreach loop. Scanners are iterable!
-      // for (Result rr : scanner) {
-      //   System.out.println("Found row: " + rr);
-      // }
-    } finally {
-      // Make sure you close your scanners when you are done!
-      // Thats why we have it inside a try/finally clause
-      scanner.close();
-    }
+
+      // This instantiates a Table object that connects you to
+      // the "myLittleHBaseTable" table (TableName.valueOf turns String into TableName instance).
+      // When done with it, close it (Should start a try/finally after this creation so it gets
+      // closed for sure but leaving this out for readibility's sake).
+      Table table = connection.getTable(TableName.valueOf("myLittleHBaseTable"));
+      try {
+
+        // To add to a row, use Put.  A Put constructor takes the name of the row
+        // you want to insert into as a byte array.  In HBase, the Bytes class has
+        // utility for converting all kinds of java types to byte arrays.  In the
+        // below, we are converting the String "myLittleRow" into a byte array to
+        // use as a row key for our update. Once you have a Put instance, you can
+        // adorn it by setting the names of columns you want to update on the row,
+        // the timestamp to use in your update, etc.If no timestamp, the server
+        // applies current time to the edits.
+        Put p = new Put(Bytes.toBytes("myLittleRow"));
+
+        // To set the value you'd like to update in the row 'myLittleRow', specify
+        // the column family, column qualifier, and value of the table cell you'd
+        // like to update.  The column family must already exist in your table
+        // schema.  The qualifier can be anything.  All must be specified as byte
+        // arrays as hbase is all about byte arrays.  Lets pretend the table
+        // 'myLittleHBaseTable' was created with a family 'myLittleFamily'.
+        p.add(Bytes.toBytes("myLittleFamily"), Bytes.toBytes("someQualifier"),
+        Bytes.toBytes("Some Value"));
+
+        // Once you've adorned your Put instance with all the updates you want to
+        // make, to commit it do the following (The HTable#put method takes the
+        // Put instance you've been building and pushes the changes you made into
+        // hbase)
+        table.put(p);
+
+        // Now, to retrieve the data we just wrote. The values that come back are
+        // Result instances. Generally, a Result is an object that will package up
+        // the hbase return into the form you find most palatable.
+        Get g = new Get(Bytes.toBytes("myLittleRow"));
+        Result r = table.get(g);
+        byte [] value = r.getValue(Bytes.toBytes("myLittleFamily"),
+          Bytes.toBytes("someQualifier"));
+        // If we convert the value bytes, we should get back 'Some Value', the
+        // value we inserted at this location.
+        String valueStr = Bytes.toString(value);
+        System.out.println("GET: " + valueStr);
+
+        // Sometimes, you won't know the row you're looking for. In this case, you
+        // use a Scanner. This will give you cursor-like interface to the contents
+        // of the table.  To set up a Scanner, do like you did above making a Put
+        // and a Get, create a Scan.  Adorn it with column names, etc.
+        Scan s = new Scan();
+        s.addColumn(Bytes.toBytes("myLittleFamily"), Bytes.toBytes("someQualifier"));
+        ResultScanner scanner = table.getScanner(s);
+        try {
+           // Scanners return Result instances.
+           // Now, for the actual iteration. One way is to use a while loop like so:
+           for (Result rr = scanner.next(); rr != null; rr = scanner.next()) {
+             // print out the row we found and the columns we were looking for
+             System.out.println("Found row: " + rr);
+           }
+
+           // The other approach is to use a foreach loop. Scanners are iterable!
+           // for (Result rr : scanner) {
+           //   System.out.println("Found row: " + rr);
+           // }
+         } finally {
+           // Make sure you close your scanners when you are done!
+           // Thats why we have it inside a try/finally clause
+           scanner.close();
+         }
+
+         // Close your table and cluster connection.
+       } finally {
+         if (table != null) table.close();
+       }
+     } finally {
+       connection.close();
+     }
   }
 }
 </pre></blockquote>
 </div>
 
 <p>There are many other methods for putting data into and getting data out of
-  HBase, but these examples should get you started. See the HTable javadoc for
+  HBase, but these examples should get you started. See the Table javadoc for
   more methods. Additionally, there are methods for managing tables in the
-  HBaseAdmin class.</p>
+  Admin class.</p>
 
 <p>If your client is NOT Java, then you should consider the Thrift or REST
   libraries.</p>
@@ -168,20 +191,14 @@ public class MyLittleHBaseClient {
 <h2><a name="related" >Related Documentation</a></h2>
 <ul>
   <li><a href="http://hbase.org">HBase Home Page</a>
-  <li><a href="http://wiki.apache.org/hadoop/Hbase">HBase Wiki</a>
   <li><a href="http://hadoop.apache.org/">Hadoop Home Page</a>
 </ul>
 </pre></code>
 </div>
 
-<p>There are many other methods for putting data into and getting data out of
-  HBase, but these examples should get you started. See the HTable javadoc for
-  more methods. Additionally, there are methods for managing tables in the
-  HBaseAdmin class.</p>
-
   <p>See also the section in the HBase Reference Guide where it discusses
   <a href="http://hbase.apache.org/book.html#client">HBase Client</a>.  It
-  has section on how to access HBase from inside your multithreaded environtment
+  has section on how to access HBase from inside your multithreaded environment
   how to control resources consumed client-side, etc.</p>
 </body>
 </html>

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java
index f13ce28..68c8e0a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java
@@ -30,10 +30,12 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
 import org.apache.hadoop.util.StringUtils;
 
@@ -47,23 +49,40 @@ public class QuotaRetriever implements Closeable, Iterable<QuotaSettings> {
 
   private final Queue<QuotaSettings> cache = new LinkedList<QuotaSettings>();
   private ResultScanner scanner;
-  private HTable table;
+  /**
+   * Connection to use.
+   * Could pass one in and have this class use it but this class wants to be standalone.
+   */
+  private Connection connection;
+  private Table table;
 
   private QuotaRetriever() {
   }
 
   void init(final Configuration conf, final Scan scan) throws IOException {
-    table = new HTable(conf, QuotaTableUtil.QUOTA_TABLE_NAME);
+    this.connection = ConnectionFactory.createConnection(conf);
+    this.table = this.connection.getTable(QuotaTableUtil.QUOTA_TABLE_NAME);
     try {
       scanner = table.getScanner(scan);
     } catch (IOException e) {
-      table.close();
+      try {
+        close();
+      } catch (IOException ioe) {
+        LOG.warn("Failed getting scanner and then failed close on cleanup", e);
+      }
       throw e;
     }
   }
 
   public void close() throws IOException {
-    table.close();
+    if (this.table != null) {
+      this.table.close();
+      this.table = null;
+    }
+    if (this.connection != null) {
+      this.connection.close();
+      this.connection = null;
+    }
   }
 
   public QuotaSettings next() throws IOException {
@@ -163,4 +182,4 @@ public class QuotaRetriever implements Closeable, Iterable<QuotaSettings> {
     scanner.init(conf, scan);
     return scanner;
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
index 6153876..0ad81ae 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
@@ -27,15 +27,15 @@ import java.util.regex.Pattern;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.filter.CompareFilter;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.FilterList;
@@ -78,41 +78,42 @@ public class QuotaTableUtil {
   /* =========================================================================
    *  Quota "settings" helpers
    */
-  public static Quotas getTableQuota(final Configuration conf, final TableName table)
+  public static Quotas getTableQuota(final Connection connection, final TableName table)
       throws IOException {
-    return getQuotas(conf, getTableRowKey(table));
+    return getQuotas(connection, getTableRowKey(table));
   }
 
-  public static Quotas getNamespaceQuota(final Configuration conf, final String namespace)
+  public static Quotas getNamespaceQuota(final Connection connection, final String namespace)
       throws IOException {
-    return getQuotas(conf, getNamespaceRowKey(namespace));
+    return getQuotas(connection, getNamespaceRowKey(namespace));
   }
 
-  public static Quotas getUserQuota(final Configuration conf, final String user)
+  public static Quotas getUserQuota(final Connection connection, final String user)
       throws IOException {
-    return getQuotas(conf, getUserRowKey(user));
+    return getQuotas(connection, getUserRowKey(user));
   }
 
-  public static Quotas getUserQuota(final Configuration conf, final String user,
+  public static Quotas getUserQuota(final Connection connection, final String user,
       final TableName table) throws IOException {
-    return getQuotas(conf, getUserRowKey(user), getSettingsQualifierForUserTable(table));
+    return getQuotas(connection, getUserRowKey(user), getSettingsQualifierForUserTable(table));
   }
 
-  public static Quotas getUserQuota(final Configuration conf, final String user,
+  public static Quotas getUserQuota(final Connection connection, final String user,
       final String namespace) throws IOException {
-    return getQuotas(conf, getUserRowKey(user), getSettingsQualifierForUserNamespace(namespace));
+    return getQuotas(connection, getUserRowKey(user),
+      getSettingsQualifierForUserNamespace(namespace));
   }
 
-  private static Quotas getQuotas(final Configuration conf, final byte[] rowKey)
+  private static Quotas getQuotas(final Connection connection, final byte[] rowKey)
       throws IOException {
-    return getQuotas(conf, rowKey, QUOTA_QUALIFIER_SETTINGS);
+    return getQuotas(connection, rowKey, QUOTA_QUALIFIER_SETTINGS);
   }
 
-  private static Quotas getQuotas(final Configuration conf, final byte[] rowKey,
+  private static Quotas getQuotas(final Connection connection, final byte[] rowKey,
       final byte[] qualifier) throws IOException {
     Get get = new Get(rowKey);
     get.addColumn(QUOTA_FAMILY_INFO, qualifier);
-    Result result = doGet(conf, get);
+    Result result = doGet(connection, get);
     if (result.isEmpty()) {
       return null;
     }
@@ -321,23 +322,17 @@ public class QuotaTableUtil {
   /* =========================================================================
    *  HTable helpers
    */
-  protected static Result doGet(final Configuration conf, final Get get)
+  protected static Result doGet(final Connection connection, final Get get)
       throws IOException {
-    HTable table = new HTable(conf, QUOTA_TABLE_NAME);
-    try {
+    try (Table table = connection.getTable(QUOTA_TABLE_NAME)) {
       return table.get(get);
-    } finally {
-      table.close();
     }
   }
 
-  protected static Result[] doGet(final Configuration conf, final List<Get> gets)
+  protected static Result[] doGet(final Connection connection, final List<Get> gets)
       throws IOException {
-    HTable table = new HTable(conf, QUOTA_TABLE_NAME);
-    try {
+    try (Table table = connection.getTable(QUOTA_TABLE_NAME)) {
       return table.get(gets);
-    } finally {
-      table.close();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java
index 922bf67..ae43c17 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java
@@ -32,8 +32,8 @@ import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -50,11 +50,7 @@ public class AccessControlClient {
   public static final TableName ACL_TABLE_NAME =
       TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "acl");
 
-  private static HTable getAclTable(Configuration conf) throws IOException {
-    return new HTable(conf, ACL_TABLE_NAME);
-  }
-
-  private static BlockingInterface getAccessControlServiceStub(HTable ht)
+  private static BlockingInterface getAccessControlServiceStub(Table ht)
       throws IOException {
     CoprocessorRpcChannel service = ht.coprocessorService(HConstants.EMPTY_START_ROW);
     BlockingInterface protocol =
@@ -75,14 +71,12 @@ public class AccessControlClient {
   public static void grant(Configuration conf, final TableName tableName,
       final String userName, final byte[] family, final byte[] qual,
       final Permission.Action... actions) throws Throwable {
-    HTable ht = null;
-    try {
-      ht = getAclTable(conf);
-      ProtobufUtil.grant(getAccessControlServiceStub(ht), userName, tableName, family, qual,
+    // TODO: Make it so caller passes in a Connection rather than have us do this expensive
+    // setup each time.  This class only used in test and shell at moment though.
+    try (Connection connection = ConnectionFactory.createConnection(conf)) {
+      try (Table table = connection.getTable(ACL_TABLE_NAME)) {
+        ProtobufUtil.grant(getAccessControlServiceStub(table), userName, tableName, family, qual,
           actions);
-    } finally {
-      if (ht != null) {
-        ht.close();
       }
     }
   }
@@ -97,26 +91,22 @@ public class AccessControlClient {
    */
   public static void grant(Configuration conf, final String namespace,
       final String userName, final Permission.Action... actions) throws Throwable {
-    HTable ht = null;
-    try {
-      ht = getAclTable(conf);
-      ProtobufUtil.grant(getAccessControlServiceStub(ht), userName, namespace, actions);
-    } finally {
-      if (ht != null) {
-        ht.close();
+    // TODO: Make it so caller passes in a Connection rather than have us do this expensive
+    // setup each time.  This class only used in test and shell at moment though.
+    try (Connection connection = ConnectionFactory.createConnection(conf)) {
+      try (Table table = connection.getTable(ACL_TABLE_NAME)) {
+        ProtobufUtil.grant(getAccessControlServiceStub(table), userName, namespace, actions);
       }
     }
   }
 
   public static boolean isAccessControllerRunning(Configuration conf)
       throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
-    HBaseAdmin ha = null;
-    try {
-      ha = new HBaseAdmin(conf);
-      return ha.isTableAvailable(ACL_TABLE_NAME);
-    } finally {
-      if (ha != null) {
-        ha.close();
+    // TODO: Make it so caller passes in a Connection rather than have us do this expensive
+    // setup each time.  This class only used in test and shell at moment though.
+    try (Connection connection = ConnectionFactory.createConnection(conf)) {
+      try (Admin admin = connection.getAdmin()) {
+        return admin.isTableAvailable(ACL_TABLE_NAME);
       }
     }
   }
@@ -134,14 +124,12 @@ public class AccessControlClient {
   public static void revoke(Configuration conf, final TableName tableName,
       final String username, final byte[] family, final byte[] qualifier,
       final Permission.Action... actions) throws Throwable {
-    HTable ht = null;
-    try {
-      ht = getAclTable(conf);
-      ProtobufUtil.revoke(getAccessControlServiceStub(ht), username, tableName, family, qualifier,
-          actions);
-    } finally {
-      if (ht != null) {
-        ht.close();
+    // TODO: Make it so caller passes in a Connection rather than have us do this expensive
+    // setup each time.  This class only used in test and shell at moment though.
+    try (Connection connection = ConnectionFactory.createConnection(conf)) {
+      try (Table table = connection.getTable(ACL_TABLE_NAME)) {
+        ProtobufUtil.revoke(getAccessControlServiceStub(table), username, tableName, family,
+          qualifier, actions);
       }
     }
   }
@@ -156,13 +144,11 @@ public class AccessControlClient {
    */
   public static void revoke(Configuration conf, final String namespace,
     final String userName, final Permission.Action... actions) throws Throwable {
-    HTable ht = null;
-    try {
-      ht = getAclTable(conf);
-      ProtobufUtil.revoke(getAccessControlServiceStub(ht), userName, namespace, actions);
-    } finally {
-      if (ht != null) {
-        ht.close();
+    // TODO: Make it so caller passes in a Connection rather than have us do this expensive
+    // setup each time.  This class only used in test and shell at moment though.
+    try (Connection connection = ConnectionFactory.createConnection(conf)) {
+      try (Table table = connection.getTable(ACL_TABLE_NAME)) {
+        ProtobufUtil.revoke(getAccessControlServiceStub(table), userName, namespace, actions);
       }
     }
   }
@@ -177,36 +163,29 @@ public class AccessControlClient {
   public static List<UserPermission> getUserPermissions(Configuration conf, String tableRegex)
       throws Throwable {
     List<UserPermission> permList = new ArrayList<UserPermission>();
-    Table ht = null;
-    Admin ha = null;
-    try {
-      ha = new HBaseAdmin(conf);
-      ht = new HTable(conf, ACL_TABLE_NAME);
-      CoprocessorRpcChannel service = ht.coprocessorService(HConstants.EMPTY_START_ROW);
-      BlockingInterface protocol = AccessControlProtos.AccessControlService
-          .newBlockingStub(service);
-      HTableDescriptor[] htds = null;
-
-      if (tableRegex == null || tableRegex.isEmpty()) {
-        permList = ProtobufUtil.getUserPermissions(protocol);
-      } else if (tableRegex.charAt(0) == '@') {
-        String namespace = tableRegex.substring(1);
-        permList = ProtobufUtil.getUserPermissions(protocol, Bytes.toBytes(namespace));
-      } else {
-        htds = ha.listTables(Pattern.compile(tableRegex));
-        for (HTableDescriptor hd : htds) {
-          permList.addAll(ProtobufUtil.getUserPermissions(protocol, hd.getTableName()));
+    // TODO: Make it so caller passes in a Connection rather than have us do this expensive
+    // setup each time.  This class only used in test and shell at moment though.
+    try (Connection connection = ConnectionFactory.createConnection(conf)) {
+      try (Table table = connection.getTable(ACL_TABLE_NAME)) {
+        try (Admin admin = connection.getAdmin()) {
+          CoprocessorRpcChannel service = table.coprocessorService(HConstants.EMPTY_START_ROW);
+          BlockingInterface protocol =
+            AccessControlProtos.AccessControlService.newBlockingStub(service);
+          HTableDescriptor[] htds = null;
+          if (tableRegex == null || tableRegex.isEmpty()) {
+            permList = ProtobufUtil.getUserPermissions(protocol);
+          } else if (tableRegex.charAt(0) == '@') {
+            String namespace = tableRegex.substring(1);
+            permList = ProtobufUtil.getUserPermissions(protocol, Bytes.toBytes(namespace));
+          } else {
+            htds = admin.listTables(Pattern.compile(tableRegex));
+            for (HTableDescriptor hd : htds) {
+              permList.addAll(ProtobufUtil.getUserPermissions(protocol, hd.getTableName()));
+            }
+          }
         }
       }
-    } finally {
-      if (ht != null) {
-        ht.close();
-      }
-      if (ha != null) {
-        ha.close();
-      }
     }
     return permList;
   }
-
-}
+}
\ No newline at end of file


Mime
View raw message