hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject [2/6] hbase git commit: HBASE-12404 Task 5 from parent: Replace internal HTable constructor use with HConnection#getTable (0.98, 0.99)
Date Tue, 25 Nov 2014 16:20:00 GMT
http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
index 4e6fe59..289741e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
@@ -43,7 +43,9 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.MetaTableAccessor.Visitor;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
@@ -62,7 +64,8 @@ import org.junit.experimental.categories.Category;
 public class TestMasterOperationsForRegionReplicas {
   final static Log LOG = LogFactory.getLog(TestRegionPlacement.class);
   private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-  private static HBaseAdmin admin;
+  private static Connection CONNECTION = null;
+  private static Admin ADMIN;
   private static int numSlaves = 2;
   private static Configuration conf;
 
@@ -71,14 +74,17 @@ public class TestMasterOperationsForRegionReplicas {
     conf = TEST_UTIL.getConfiguration();
     conf.setBoolean("hbase.tests.use.shortcircuit.reads", false);
     TEST_UTIL.startMiniCluster(numSlaves);
-    admin = new HBaseAdmin(conf);
-    while(admin.getClusterStatus().getServers().size() < numSlaves) {
+    CONNECTION = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
+    ADMIN = CONNECTION.getAdmin();
+    while(ADMIN.getClusterStatus().getServers().size() < numSlaves) {
       Thread.sleep(100);
     }
   }
 
   @AfterClass
   public static void tearDownAfterClass() throws Exception {
+    if (ADMIN != null) ADMIN.close();
+    if (CONNECTION != null && !CONNECTION.isClosed()) CONNECTION.close();
     TEST_UTIL.shutdownMiniCluster();
   }
 
@@ -91,15 +97,15 @@ public class TestMasterOperationsForRegionReplicas {
       HTableDescriptor desc = new HTableDescriptor(table);
       desc.setRegionReplication(numReplica);
       desc.addFamily(new HColumnDescriptor("family"));
-      admin.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions);
+      ADMIN.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions);
 
-      validateNumberOfRowsInMeta(table, numRegions, admin.getConnection());
+      validateNumberOfRowsInMeta(table, numRegions, ADMIN.getConnection());
       List<HRegionInfo> hris = MetaTableAccessor.getTableRegions(
-        admin.getConnection(), table);
+        ADMIN.getConnection(), table);
       assert(hris.size() == numRegions * numReplica);
     } finally {
-      admin.disableTable(table);
-      admin.deleteTable(table);
+      ADMIN.disableTable(table);
+      ADMIN.deleteTable(table);
     }
   }
 
@@ -112,11 +118,11 @@ public class TestMasterOperationsForRegionReplicas {
       HTableDescriptor desc = new HTableDescriptor(table);
       desc.setRegionReplication(numReplica);
       desc.addFamily(new HColumnDescriptor("family"));
-      admin.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions);
+      ADMIN.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions);
       TEST_UTIL.waitTableEnabled(table);
-      validateNumberOfRowsInMeta(table, numRegions, admin.getConnection());
+      validateNumberOfRowsInMeta(table, numRegions, ADMIN.getConnection());
 
-      List<HRegionInfo> hris = MetaTableAccessor.getTableRegions(admin.getConnection(), table);
+      List<HRegionInfo> hris = MetaTableAccessor.getTableRegions(ADMIN.getConnection(), table);
       assert(hris.size() == numRegions * numReplica);
       // check that the master created expected number of RegionState objects
       for (int i = 0; i < numRegions; i++) {
@@ -128,7 +134,7 @@ public class TestMasterOperationsForRegionReplicas {
         }
       }
 
-      List<Result> metaRows = MetaTableAccessor.fullScanOfMeta(admin.getConnection());
+      List<Result> metaRows = MetaTableAccessor.fullScanOfMeta(ADMIN.getConnection());
       int numRows = 0;
       for (Result result : metaRows) {
         RegionLocations locations = MetaTableAccessor.getRegionLocations(result);
@@ -145,7 +151,7 @@ public class TestMasterOperationsForRegionReplicas {
       // The same verification of the meta as above but with the SnapshotOfRegionAssignmentFromMeta
       // class
       validateFromSnapshotFromMeta(TEST_UTIL, table, numRegions, numReplica,
-        admin.getConnection());
+        ADMIN.getConnection());
 
       // Now kill the master, restart it and see if the assignments are kept
       ServerName master = TEST_UTIL.getHBaseClusterInterface().getClusterStatus().getMaster();
@@ -162,7 +168,7 @@ public class TestMasterOperationsForRegionReplicas {
         }
       }
       validateFromSnapshotFromMeta(TEST_UTIL, table, numRegions, numReplica,
-        admin.getConnection());
+        ADMIN.getConnection());
 
       // Now shut the whole cluster down, and verify the assignments are kept so that the
       // availability constraints are met.
@@ -170,46 +176,42 @@ public class TestMasterOperationsForRegionReplicas {
       TEST_UTIL.shutdownMiniHBaseCluster();
       TEST_UTIL.startMiniHBaseCluster(1, numSlaves);
       TEST_UTIL.waitTableEnabled(table);
-      admin.close();
-      admin = new HBaseAdmin(conf); 
       validateFromSnapshotFromMeta(TEST_UTIL, table, numRegions, numReplica,
-        admin.getConnection());
+        ADMIN.getConnection());
 
       // Now shut the whole cluster down, and verify regions are assigned even if there is only
       // one server running
       TEST_UTIL.shutdownMiniHBaseCluster();
       TEST_UTIL.startMiniHBaseCluster(1, 1);
       TEST_UTIL.waitTableEnabled(table);
-      admin.close();
-      admin = new HBaseAdmin(conf);
-      validateSingleRegionServerAssignment(admin.getConnection(), numRegions, numReplica);
+      validateSingleRegionServerAssignment(ADMIN.getConnection(), numRegions, numReplica);
       for (int i = 1; i < numSlaves; i++) { //restore the cluster
         TEST_UTIL.getMiniHBaseCluster().startRegionServer();
       }
 
       //check on alter table
-      admin.disableTable(table);
-      assert(admin.isTableDisabled(table));
+      ADMIN.disableTable(table);
+      assert(ADMIN.isTableDisabled(table));
       //increase the replica
       desc.setRegionReplication(numReplica + 1);
-      admin.modifyTable(table, desc);
-      admin.enableTable(table);
-      assert(admin.isTableEnabled(table));
+      ADMIN.modifyTable(table, desc);
+      ADMIN.enableTable(table);
+      assert(ADMIN.isTableEnabled(table));
       List<HRegionInfo> regions = TEST_UTIL.getMiniHBaseCluster().getMaster()
           .getAssignmentManager().getRegionStates().getRegionsOfTable(table);
       assert(regions.size() == numRegions * (numReplica + 1));
 
       //decrease the replica(earlier, table was modified to have a replica count of numReplica + 1)
-      admin.disableTable(table);
+      ADMIN.disableTable(table);
       desc.setRegionReplication(numReplica);
-      admin.modifyTable(table, desc);
-      admin.enableTable(table);
-      assert(admin.isTableEnabled(table));
+      ADMIN.modifyTable(table, desc);
+      ADMIN.enableTable(table);
+      assert(ADMIN.isTableEnabled(table));
       regions = TEST_UTIL.getMiniHBaseCluster().getMaster()
           .getAssignmentManager().getRegionStates().getRegionsOfTable(table);
       assert(regions.size() == numRegions * numReplica);
       //also make sure the meta table has the replica locations removed
-      hris = MetaTableAccessor.getTableRegions(admin.getConnection(), table);
+      hris = MetaTableAccessor.getTableRegions(ADMIN.getConnection(), table);
       assert(hris.size() == numRegions * numReplica);
       //just check that the number of default replica regions in the meta table are the same
       //as the number of regions the table was created with, and the count of the
@@ -225,8 +227,8 @@ public class TestMasterOperationsForRegionReplicas {
       Collection<Integer> counts = new HashSet<Integer>(defaultReplicas.values());
       assert(counts.size() == 1 && counts.contains(new Integer(numReplica)));
     } finally {
-      admin.disableTable(table);
-      admin.deleteTable(table);
+      ADMIN.disableTable(table);
+      ADMIN.deleteTable(table);
     }
   }
 
@@ -241,17 +243,17 @@ public class TestMasterOperationsForRegionReplicas {
       HTableDescriptor desc = new HTableDescriptor(table);
       desc.setRegionReplication(numReplica);
       desc.addFamily(new HColumnDescriptor("family"));
-      admin.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions);
+      ADMIN.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions);
       TEST_UTIL.waitTableEnabled(table);
       Set<byte[]> tableRows = new HashSet<byte[]>();
-      List<HRegionInfo> hris = MetaTableAccessor.getTableRegions(admin.getConnection(), table);
+      List<HRegionInfo> hris = MetaTableAccessor.getTableRegions(ADMIN.getConnection(), table);
       for (HRegionInfo hri : hris) {
         tableRows.add(hri.getRegionName());
       }
-      admin.disableTable(table);
+      ADMIN.disableTable(table);
       // now delete one replica info from all the rows
       // this is to make the meta appear to be only partially updated
-      Table metaTable = new HTable(TableName.META_TABLE_NAME, admin.getConnection());
+      Table metaTable = new HTable(TableName.META_TABLE_NAME, ADMIN.getConnection());
       for (byte[] row : tableRows) {
         Delete deleteOneReplicaLocation = new Delete(row);
         deleteOneReplicaLocation.deleteColumns(HConstants.CATALOG_FAMILY,
@@ -265,14 +267,14 @@ public class TestMasterOperationsForRegionReplicas {
       metaTable.close();
       // even if the meta table is partly updated, when we re-enable the table, we should
       // get back the desired number of replicas for the regions
-      admin.enableTable(table);
-      assert(admin.isTableEnabled(table));
+      ADMIN.enableTable(table);
+      assert(ADMIN.isTableEnabled(table));
       List<HRegionInfo> regions = TEST_UTIL.getMiniHBaseCluster().getMaster()
           .getAssignmentManager().getRegionStates().getRegionsOfTable(table);
       assert(regions.size() == numRegions * numReplica);
     } finally {
-      admin.disableTable(table);
-      admin.deleteTable(table);
+      ADMIN.disableTable(table);
+      ADMIN.deleteTable(table);
     }
   }
 
@@ -286,7 +288,7 @@ public class TestMasterOperationsForRegionReplicas {
 
   private void validateNumberOfRowsInMeta(final TableName table, int numRegions,
       Connection connection) throws IOException {
-    assert(admin.tableExists(table));
+    assert(ADMIN.tableExists(table));
     final AtomicInteger count = new AtomicInteger();
     Visitor visitor = new Visitor() {
       @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java
index 837b714..39ad442 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java
@@ -25,12 +25,17 @@ import static org.junit.Assert.assertTrue;
 
 import java.util.List;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.LocalHBaseCluster;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
@@ -39,6 +44,8 @@ import org.junit.experimental.categories.Category;
 
 @Category({MasterTests.class, LargeTests.class})
 public class TestMasterShutdown {
+  public static final Log LOG = LogFactory.getLog(TestMasterShutdown.class);
+
   /**
    * Simple test of shutdown.
    * <p>
@@ -46,9 +53,8 @@ public class TestMasterShutdown {
    * Verifies that all masters are properly shutdown.
    * @throws Exception
    */
-  @Test (timeout=240000)
+  @Test (timeout=120000)
   public void testMasterShutdown() throws Exception {
-
     final int NUM_MASTERS = 3;
     final int NUM_RS = 3;
 
@@ -56,9 +62,9 @@ public class TestMasterShutdown {
     Configuration conf = HBaseConfiguration.create();
 
     // Start the cluster
-    HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
-    TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
-    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
+    HBaseTestingUtility htu = new HBaseTestingUtility(conf);
+    htu.startMiniCluster(NUM_MASTERS, NUM_RS);
+    MiniHBaseCluster cluster = htu.getHBaseCluster();
 
     // get all the master threads
     List<MasterThread> masterThreads = cluster.getMasterThreads();
@@ -84,19 +90,18 @@ public class TestMasterShutdown {
 
     // tell the active master to shutdown the cluster
     active.shutdown();
-    
+
     for (int i = NUM_MASTERS - 1; i >= 0 ;--i) {
       cluster.waitOnMaster(i);
     }
     // make sure all the masters properly shutdown
-    assertEquals(0,masterThreads.size());
-    
-    TEST_UTIL.shutdownMiniCluster();
+    assertEquals(0, masterThreads.size());
+
+    htu.shutdownMiniCluster();
   }
 
-  @Test(timeout = 180000)
+  @Test(timeout = 60000)
   public void testMasterShutdownBeforeStartingAnyRegionServer() throws Exception {
-
     final int NUM_MASTERS = 1;
     final int NUM_RS = 0;
 
@@ -106,25 +111,35 @@ public class TestMasterShutdown {
     conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 1);
 
     // Start the cluster
-    final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
-    TEST_UTIL.startMiniDFSCluster(3);
-    TEST_UTIL.startMiniZKCluster();
-    TEST_UTIL.createRootDir();
+    final HBaseTestingUtility util = new HBaseTestingUtility(conf);
+    util.startMiniDFSCluster(3);
+    util.startMiniZKCluster();
+    util.createRootDir();
     final LocalHBaseCluster cluster =
         new LocalHBaseCluster(conf, NUM_MASTERS, NUM_RS, HMaster.class,
             MiniHBaseCluster.MiniHBaseClusterRegionServer.class);
-    final MasterThread master = cluster.getMasters().get(0);
+    final int MASTER_INDEX = 0;
+    final MasterThread master = cluster.getMasters().get(MASTER_INDEX);
     master.start();
+    LOG.info("Called master start on " + master.getName());
     Thread shutdownThread = new Thread() {
       public void run() {
+        LOG.info("Before call to shutdown master");
         try {
-          TEST_UTIL.getHBaseAdmin().shutdown();
-          cluster.waitOnMaster(0);
+          try (Connection connection =
+              ConnectionFactory.createConnection(util.getConfiguration())) {
+            try (Admin admin = connection.getAdmin()) {
+              admin.shutdown();
+            }
+          }
+          LOG.info("After call to shutdown master");
+          cluster.waitOnMaster(MASTER_INDEX);
         } catch (Exception e) {
         }
       };
     };
     shutdownThread.start();
+    LOG.info("Called master join on " + master.getName());
     master.join();
     shutdownThread.join();
 
@@ -132,10 +147,8 @@ public class TestMasterShutdown {
     // make sure all the masters properly shutdown
     assertEquals(0, masterThreads.size());
 
-    TEST_UTIL.shutdownMiniZKCluster();
-    TEST_UTIL.shutdownMiniDFSCluster();
-    TEST_UTIL.cleanupTestDir();
+    util.shutdownMiniZKCluster();
+    util.shutdownMiniDFSCluster();
+    util.cleanupTestDir();
   }
-
-}
-
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
index 74afe7e..5b6f985 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
@@ -131,7 +131,7 @@ public class TestRestartCluster {
     // We don't have to use SnapshotOfRegionAssignmentFromMeta.
     // We use it here because AM used to use it to load all user region placements
     SnapshotOfRegionAssignmentFromMeta snapshot = new SnapshotOfRegionAssignmentFromMeta(
-      master.getShortCircuitConnection());
+      master.getConnection());
     snapshot.initialize();
     Map<HRegionInfo, ServerName> regionToRegionServerMap
       = snapshot.getRegionToRegionServerMap();
@@ -197,7 +197,7 @@ public class TestRestartCluster {
       Threads.sleep(100);
     }
 
-    snapshot =new SnapshotOfRegionAssignmentFromMeta(master.getShortCircuitConnection());
+    snapshot =new SnapshotOfRegionAssignmentFromMeta(master.getConnection());
     snapshot.initialize();
     Map<HRegionInfo, ServerName> newRegionToRegionServerMap =
       snapshot.getRegionToRegionServerMap();

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java
index 76a6db9..53de0a4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java
@@ -58,7 +58,7 @@ import org.apache.hadoop.hbase.SplitLogCounters;
 import org.apache.hadoop.hbase.SplitLogTask;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.Waiter;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination;
 import org.apache.hadoop.hbase.master.SplitLogManager.Task;
 import org.apache.hadoop.hbase.master.SplitLogManager.TaskBatch;
@@ -154,7 +154,7 @@ public class TestSplitLogManager {
     }
 
     @Override
-    public HConnection getShortCircuitConnection() {
+    public ClusterConnection getConnection() {
       return null;
     }
 
@@ -162,7 +162,6 @@ public class TestSplitLogManager {
     public MetaTableLocator getMetaTableLocator() {
       return null;
     }
-
   }
 
   static Stoppable stopper = new Stoppable() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
index a0b479f..b045c72 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
@@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.EnvironmentEdge;
@@ -217,7 +217,7 @@ public class TestHFileCleaner {
     }
 
     @Override
-    public HConnection getShortCircuitConnection() {
+    public ClusterConnection getConnection() {
       return null;
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
index f4fff62..a004134 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
@@ -33,7 +33,8 @@ import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
@@ -153,7 +154,7 @@ public class TestHFileLinkCleaner {
     }
 
     @Override
-    public HConnection getShortCircuitConnection() {
+    public ClusterConnection getConnection() {
       return null;
     }
 
@@ -183,4 +184,4 @@ public class TestHFileLinkCleaner {
       return false;
     }
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
index 2400584..4e8ec09 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
@@ -26,9 +26,13 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.*;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.replication.ReplicationFactory;
 import org.apache.hadoop.hbase.replication.ReplicationQueues;
 import org.apache.hadoop.hbase.replication.regionserver.Replication;
@@ -160,7 +164,7 @@ public class TestLogsCleaner {
     }
 
     @Override
-    public HConnection getShortCircuitConnection() {
+    public ClusterConnection getConnection() {
       return null;
     }
 
@@ -190,6 +194,4 @@ public class TestLogsCleaner {
       return false;
     }
   }
-
-}
-
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaTableUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaTableUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaTableUtil.java
index 84d9155..34239c0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaTableUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaTableUtil.java
@@ -18,6 +18,9 @@
 
 package org.apache.hadoop.hbase.quotas;
 
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
@@ -25,20 +28,20 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
 import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
-
+import org.junit.After;
 import org.junit.AfterClass;
+import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.junit.Assert.assertEquals;
-
 /**
  * Test the quota table helpers (e.g. CRUD operations)
  */
@@ -47,6 +50,7 @@ public class TestQuotaTableUtil {
   final Log LOG = LogFactory.getLog(getClass());
 
   private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private Connection connection;
 
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
@@ -66,6 +70,16 @@ public class TestQuotaTableUtil {
     TEST_UTIL.shutdownMiniCluster();
   }
 
+  @Before
+  public void before() throws IOException {
+    this.connection = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
+  }
+
+  @After
+  public void after() throws IOException {
+    this.connection.close();
+  }
+
   @Test
   public void testTableQuotaUtil() throws Exception {
     final TableName table = TableName.valueOf("testTableQuotaUtilTable");
@@ -79,13 +93,13 @@ public class TestQuotaTableUtil {
               .build();
 
     // Add user quota and verify it
-    QuotaUtil.addTableQuota(TEST_UTIL.getConfiguration(), table, quota);
-    Quotas resQuota = QuotaUtil.getTableQuota(TEST_UTIL.getConfiguration(), table);
+    QuotaUtil.addTableQuota(this.connection, table, quota);
+    Quotas resQuota = QuotaUtil.getTableQuota(this.connection, table);
     assertEquals(quota, resQuota);
 
     // Remove user quota and verify it
-    QuotaUtil.deleteTableQuota(TEST_UTIL.getConfiguration(), table);
-    resQuota = QuotaUtil.getTableQuota(TEST_UTIL.getConfiguration(), table);
+    QuotaUtil.deleteTableQuota(this.connection, table);
+    resQuota = QuotaUtil.getTableQuota(this.connection, table);
     assertEquals(null, resQuota);
   }
 
@@ -102,13 +116,13 @@ public class TestQuotaTableUtil {
               .build();
 
     // Add user quota and verify it
-    QuotaUtil.addNamespaceQuota(TEST_UTIL.getConfiguration(), namespace, quota);
-    Quotas resQuota = QuotaUtil.getNamespaceQuota(TEST_UTIL.getConfiguration(), namespace);
+    QuotaUtil.addNamespaceQuota(this.connection, namespace, quota);
+    Quotas resQuota = QuotaUtil.getNamespaceQuota(this.connection, namespace);
     assertEquals(quota, resQuota);
 
     // Remove user quota and verify it
-    QuotaUtil.deleteNamespaceQuota(TEST_UTIL.getConfiguration(), namespace);
-    resQuota = QuotaUtil.getNamespaceQuota(TEST_UTIL.getConfiguration(), namespace);
+    QuotaUtil.deleteNamespaceQuota(this.connection, namespace);
+    resQuota = QuotaUtil.getNamespaceQuota(this.connection, namespace);
     assertEquals(null, resQuota);
   }
 
@@ -139,33 +153,33 @@ public class TestQuotaTableUtil {
               .build();
 
     // Add user global quota
-    QuotaUtil.addUserQuota(TEST_UTIL.getConfiguration(), user, quota);
-    Quotas resQuota = QuotaUtil.getUserQuota(TEST_UTIL.getConfiguration(), user);
+    QuotaUtil.addUserQuota(this.connection, user, quota);
+    Quotas resQuota = QuotaUtil.getUserQuota(this.connection, user);
     assertEquals(quota, resQuota);
 
     // Add user quota for table
-    QuotaUtil.addUserQuota(TEST_UTIL.getConfiguration(), user, table, quotaTable);
-    Quotas resQuotaTable = QuotaUtil.getUserQuota(TEST_UTIL.getConfiguration(), user, table);
+    QuotaUtil.addUserQuota(this.connection, user, table, quotaTable);
+    Quotas resQuotaTable = QuotaUtil.getUserQuota(this.connection, user, table);
     assertEquals(quotaTable, resQuotaTable);
 
     // Add user quota for namespace
-    QuotaUtil.addUserQuota(TEST_UTIL.getConfiguration(), user, namespace, quotaNamespace);
-    Quotas resQuotaNS = QuotaUtil.getUserQuota(TEST_UTIL.getConfiguration(), user, namespace);
+    QuotaUtil.addUserQuota(this.connection, user, namespace, quotaNamespace);
+    Quotas resQuotaNS = QuotaUtil.getUserQuota(this.connection, user, namespace);
     assertEquals(quotaNamespace, resQuotaNS);
 
     // Delete user global quota
-    QuotaUtil.deleteUserQuota(TEST_UTIL.getConfiguration(), user);
-    resQuota = QuotaUtil.getUserQuota(TEST_UTIL.getConfiguration(), user);
+    QuotaUtil.deleteUserQuota(this.connection, user);
+    resQuota = QuotaUtil.getUserQuota(this.connection, user);
     assertEquals(null, resQuota);
 
     // Delete user quota for table
-    QuotaUtil.deleteUserQuota(TEST_UTIL.getConfiguration(), user, table);
-    resQuotaTable = QuotaUtil.getUserQuota(TEST_UTIL.getConfiguration(), user, table);
+    QuotaUtil.deleteUserQuota(this.connection, user, table);
+    resQuotaTable = QuotaUtil.getUserQuota(this.connection, user, table);
     assertEquals(null, resQuotaTable);
 
     // Delete user quota for namespace
-    QuotaUtil.deleteUserQuota(TEST_UTIL.getConfiguration(), user, namespace);
-    resQuotaNS = QuotaUtil.getUserQuota(TEST_UTIL.getConfiguration(), user, namespace);
+    QuotaUtil.deleteUserQuota(this.connection, user, namespace);
+    resQuotaNS = QuotaUtil.getUserQuota(this.connection, user, namespace);
     assertEquals(null, resQuotaNS);
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java
index 44d3b45..ce2869b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java
@@ -57,6 +57,7 @@ public class TestHRegionOnCluster {
   public void testDataCorrectnessReplayingRecoveredEdits() throws Exception {
     final int NUM_MASTERS = 1;
     final int NUM_RS = 3;
+    Admin hbaseAdmin = null;
     TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
 
     try {
@@ -68,7 +69,7 @@ public class TestHRegionOnCluster {
       // Create table
       HTableDescriptor desc = new HTableDescriptor(TABLENAME);
       desc.addFamily(new HColumnDescriptor(FAMILY));
-      Admin hbaseAdmin = TEST_UTIL.getHBaseAdmin();
+      hbaseAdmin = master.getConnection().getAdmin();
       hbaseAdmin.createTable(desc);
 
       assertTrue(hbaseAdmin.isTableAvailable(TABLENAME));
@@ -130,6 +131,7 @@ public class TestHRegionOnCluster {
       putDataAndVerify(table, "r4", FAMILY, "v4", 4);
 
     } finally {
+      if (hbaseAdmin != null) hbaseAdmin.close();
       TEST_UTIL.shutdownMiniCluster();
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
index fcfe063..91de97c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
@@ -32,8 +32,8 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.io.hfile.BlockCache;
-import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
 import org.apache.hadoop.hbase.io.hfile.CacheStats;
 import org.apache.hadoop.hbase.io.hfile.Cacheable;
@@ -477,7 +477,7 @@ public class TestHeapMemoryManager {
     }
 
     @Override
-    public HConnection getShortCircuitConnection() {
+    public ClusterConnection getConnection() {
       return null;
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
index 1a14571..80599ea 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
@@ -209,12 +209,12 @@ public class TestRegionMergeTransactionOnCluster {
       table.close();
 
       List<Pair<HRegionInfo, ServerName>> tableRegions = MetaTableAccessor
-          .getTableRegionsAndLocations(master.getShortCircuitConnection(), tableName);
+          .getTableRegionsAndLocations(master.getConnection(), tableName);
       HRegionInfo mergedRegionInfo = tableRegions.get(0).getFirst();
       HTableDescriptor tableDescritor = master.getTableDescriptors().get(
           tableName);
       Result mergedRegionResult = MetaTableAccessor.getRegionResult(
-        master.getShortCircuitConnection(), mergedRegionInfo.getRegionName());
+        master.getConnection(), mergedRegionInfo.getRegionName());
 
       // contains merge reference in META
       assertTrue(mergedRegionResult.getValue(HConstants.CATALOG_FAMILY,
@@ -257,7 +257,7 @@ public class TestRegionMergeTransactionOnCluster {
       assertFalse(fs.exists(regionBdir));
 
       mergedRegionResult = MetaTableAccessor.getRegionResult(
-        master.getShortCircuitConnection(), mergedRegionInfo.getRegionName());
+        master.getConnection(), mergedRegionInfo.getRegionName());
       assertFalse(mergedRegionResult.getValue(HConstants.CATALOG_FAMILY,
           HConstants.MERGEA_QUALIFIER) != null);
       assertFalse(mergedRegionResult.getValue(HConstants.CATALOG_FAMILY,
@@ -327,13 +327,13 @@ public class TestRegionMergeTransactionOnCluster {
     createTableAndLoadData(master, tableName, 5, 2);
     List<Pair<HRegionInfo, ServerName>> initialRegionToServers =
         MetaTableAccessor.getTableRegionsAndLocations(
-            master.getShortCircuitConnection(), tableName);
+            master.getConnection(), tableName);
     // Merge 1st and 2nd region
     PairOfSameType<HRegionInfo> mergedRegions = mergeRegionsAndVerifyRegionNum(master, tableName,
         0, 2, 5 * 2 - 2);
     List<Pair<HRegionInfo, ServerName>> currentRegionToServers =
         MetaTableAccessor.getTableRegionsAndLocations(
-            master.getShortCircuitConnection(), tableName);
+            master.getConnection(), tableName);
     List<HRegionInfo> initialRegions = new ArrayList<HRegionInfo>();
     for (Pair<HRegionInfo, ServerName> p : initialRegionToServers) {
       initialRegions.add(p.getFirst());
@@ -373,7 +373,7 @@ public class TestRegionMergeTransactionOnCluster {
       int regionAnum, int regionBnum) throws Exception {
     List<Pair<HRegionInfo, ServerName>> tableRegions = MetaTableAccessor
         .getTableRegionsAndLocations(
-          master.getShortCircuitConnection(), tablename);
+          master.getConnection(), tablename);
     HRegionInfo regionA = tableRegions.get(regionAnum).getFirst();
     HRegionInfo regionB = tableRegions.get(regionBnum).getFirst();
     TEST_UTIL.getHBaseAdmin().mergeRegions(
@@ -389,7 +389,7 @@ public class TestRegionMergeTransactionOnCluster {
     long timeout = System.currentTimeMillis() + waitTime;
     while (System.currentTimeMillis() < timeout) {
       tableRegionsInMeta = MetaTableAccessor.getTableRegionsAndLocations(
-        master.getShortCircuitConnection(), tablename);
+        master.getConnection(), tablename);
       tableRegionsInMaster = master.getAssignmentManager().getRegionStates()
           .getRegionsOfTable(tablename);
       if (tableRegionsInMeta.size() == expectedRegionNum
@@ -400,7 +400,7 @@ public class TestRegionMergeTransactionOnCluster {
     }
 
     tableRegionsInMeta = MetaTableAccessor.getTableRegionsAndLocations(
-      master.getShortCircuitConnection(), tablename);
+      master.getConnection(), tablename);
     LOG.info("Regions after merge:" + Joiner.on(',').join(tableRegionsInMeta));
     assertEquals(expectedRegionNum, tableRegionsInMeta.size());
   }
@@ -430,14 +430,14 @@ public class TestRegionMergeTransactionOnCluster {
     List<Pair<HRegionInfo, ServerName>> tableRegions;
     while (System.currentTimeMillis() < timeout) {
       tableRegions = MetaTableAccessor.getTableRegionsAndLocations(
-        master.getShortCircuitConnection(), tablename);
+        master.getConnection(), tablename);
       if (tableRegions.size() == numRegions * replication)
         break;
       Thread.sleep(250);
     }
 
     tableRegions = MetaTableAccessor.getTableRegionsAndLocations(
-      master.getShortCircuitConnection(), tablename);
+      master.getConnection(), tablename);
     LOG.info("Regions after load: " + Joiner.on(',').join(tableRegions));
     assertEquals(numRegions * replication, tableRegions.size());
     return table;

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java
index 1ae58f1..44d7464 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java
@@ -18,10 +18,11 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.CoreMatchers.not;
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertThat;
-import static org.hamcrest.CoreMatchers.*;
+import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -36,17 +37,17 @@ import org.apache.hadoop.hbase.CoordinatedStateManagerFactory;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.SplitLogCounters;
 import org.apache.hadoop.hbase.SplitLogTask;
 import org.apache.hadoop.hbase.Waiter;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.executor.ExecutorType;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.CancelableProgressable;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
@@ -128,7 +129,7 @@ public class TestSplitLogWorker {
     }
 
     @Override
-    public HConnection getShortCircuitConnection() {
+    public ClusterConnection getConnection() {
       return null;
     }
 
@@ -136,7 +137,6 @@ public class TestSplitLogWorker {
     public MetaTableLocator getMetaTableLocator() {
       return null;
     }
-
   }
 
   private void waitForCounter(AtomicLong ctr, long oldval, long newval, long timems)

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
index 678adc4..858b8cb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
@@ -634,7 +634,7 @@ public class TestSplitTransactionOnCluster {
       admin.setBalancerRunning(false, true);
       // Turn off the meta scanner so it don't remove parent on us.
       cluster.getMaster().setCatalogJanitorEnabled(false);
-      boolean tableExists = MetaTableAccessor.tableExists(regionServer.getShortCircuitConnection(),
+      boolean tableExists = MetaTableAccessor.tableExists(regionServer.getConnection(),
         tableName);
       assertEquals("The specified table should present.", true, tableExists);
       final HRegion region = findSplittableRegion(regions);
@@ -646,7 +646,7 @@ public class TestSplitTransactionOnCluster {
       } catch (IOException e) {
 
       }
-      tableExists = MetaTableAccessor.tableExists(regionServer.getShortCircuitConnection(),
+      tableExists = MetaTableAccessor.tableExists(regionServer.getConnection(),
         tableName);
       assertEquals("The specified table should present.", true, tableExists);
     } finally {
@@ -680,7 +680,7 @@ public class TestSplitTransactionOnCluster {
       admin.setBalancerRunning(false, true);
       // Turn off the meta scanner so it don't remove parent on us.
       cluster.getMaster().setCatalogJanitorEnabled(false);
-      boolean tableExists = MetaTableAccessor.tableExists(regionServer.getShortCircuitConnection(),
+      boolean tableExists = MetaTableAccessor.tableExists(regionServer.getConnection(),
           tableName);
       assertEquals("The specified table should be present.", true, tableExists);
       final HRegion region = findSplittableRegion(oldRegions);
@@ -703,7 +703,7 @@ public class TestSplitTransactionOnCluster {
         Thread.sleep(1000);
       } while ((newRegions.contains(oldRegions.get(0)) || newRegions.contains(oldRegions.get(1)))
           || newRegions.size() != 4);
-      tableExists = MetaTableAccessor.tableExists(regionServer.getShortCircuitConnection(),
+      tableExists = MetaTableAccessor.tableExists(regionServer.getConnection(),
           tableName);
       assertEquals("The specified table should be present.", true, tableExists);
       // exists works on stale and we see the put after the flush

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java
index ab5f136..169feba 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java
@@ -32,11 +32,17 @@ import java.util.Map;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Table;
@@ -267,115 +273,127 @@ public class TestPerTableCFReplication {
   @Test(timeout=300000)
   public void testPerTableCFReplication() throws Exception {
     LOG.info("testPerTableCFReplication");
-    ReplicationAdmin admin1 = new ReplicationAdmin(conf1);
-
-    new HBaseAdmin(conf1).createTable(tabA);
-    new HBaseAdmin(conf1).createTable(tabB);
-    new HBaseAdmin(conf1).createTable(tabC);
-    new HBaseAdmin(conf2).createTable(tabA);
-    new HBaseAdmin(conf2).createTable(tabB);
-    new HBaseAdmin(conf2).createTable(tabC);
-    new HBaseAdmin(conf3).createTable(tabA);
-    new HBaseAdmin(conf3).createTable(tabB);
-    new HBaseAdmin(conf3).createTable(tabC);
-
-    Table htab1A = new HTable(conf1, tabAName);
-    Table htab2A = new HTable(conf2, tabAName);
-    Table htab3A = new HTable(conf3, tabAName);
-
-    Table htab1B = new HTable(conf1, tabBName);
-    Table htab2B = new HTable(conf2, tabBName);
-    Table htab3B = new HTable(conf3, tabBName);
-
-    Table htab1C = new HTable(conf1, tabCName);
-    Table htab2C = new HTable(conf2, tabCName);
-    Table htab3C = new HTable(conf3, tabCName);
-
-    // A. add cluster2/cluster3 as peers to cluster1
-    admin1.addPeer("2", utility2.getClusterKey(), "TC;TB:f1,f3");
-    admin1.addPeer("3", utility3.getClusterKey(), "TA;TB:f1,f2");
-
-    // A1. tableA can only replicated to cluster3
-    putAndWaitWithFamily(row1, f1Name, htab1A, htab3A);
-    ensureRowNotReplicated(row1, f1Name, htab2A);
-    deleteAndWaitWithFamily(row1, f1Name, htab1A, htab3A);
-
-    putAndWaitWithFamily(row1, f2Name, htab1A, htab3A);
-    ensureRowNotReplicated(row1, f2Name, htab2A);
-    deleteAndWaitWithFamily(row1, f2Name, htab1A, htab3A);
-
-    putAndWaitWithFamily(row1, f3Name, htab1A, htab3A);
-    ensureRowNotReplicated(row1, f3Name, htab2A);
-    deleteAndWaitWithFamily(row1, f3Name, htab1A, htab3A);
-
-    // A2. cf 'f1' of tableB can replicated to both cluster2 and cluster3
-    putAndWaitWithFamily(row1, f1Name, htab1B, htab2B, htab3B);
-    deleteAndWaitWithFamily(row1, f1Name, htab1B, htab2B, htab3B);
-
-    //  cf 'f2' of tableB can only replicated to cluster3
-    putAndWaitWithFamily(row1, f2Name, htab1B, htab3B);
-    ensureRowNotReplicated(row1, f2Name, htab2B);
-    deleteAndWaitWithFamily(row1, f2Name, htab1B, htab3B);
-
-    //  cf 'f3' of tableB can only replicated to cluster2
-    putAndWaitWithFamily(row1, f3Name, htab1B, htab2B);
-    ensureRowNotReplicated(row1, f3Name, htab3B);
-    deleteAndWaitWithFamily(row1, f3Name, htab1B, htab2B);
-
-    // A3. tableC can only replicated to cluster2
-    putAndWaitWithFamily(row1, f1Name, htab1C, htab2C);
-    ensureRowNotReplicated(row1, f1Name, htab3C);
-    deleteAndWaitWithFamily(row1, f1Name, htab1C, htab2C);
-
-    putAndWaitWithFamily(row1, f2Name, htab1C, htab2C);
-    ensureRowNotReplicated(row1, f2Name, htab3C);
-    deleteAndWaitWithFamily(row1, f2Name, htab1C, htab2C);
-
-    putAndWaitWithFamily(row1, f3Name, htab1C, htab2C);
-    ensureRowNotReplicated(row1, f3Name, htab3C);
-    deleteAndWaitWithFamily(row1, f3Name, htab1C, htab2C);
-
-    // B. change peers' replicable table-cf config
-    admin1.setPeerTableCFs("2", "TA:f1,f2; TC:f2,f3");
-    admin1.setPeerTableCFs("3", "TB; TC:f3");
-
-    // B1. cf 'f1' of tableA can only replicated to cluster2
-    putAndWaitWithFamily(row2, f1Name, htab1A, htab2A);
-    ensureRowNotReplicated(row2, f1Name, htab3A);
-    deleteAndWaitWithFamily(row2, f1Name, htab1A, htab2A);
-    //     cf 'f2' of tableA can only replicated to cluster2
-    putAndWaitWithFamily(row2, f2Name, htab1A, htab2A);
-    ensureRowNotReplicated(row2, f2Name, htab3A);
-    deleteAndWaitWithFamily(row2, f2Name, htab1A, htab2A);
-    //     cf 'f3' of tableA isn't replicable to either cluster2 or cluster3
-    putAndWaitWithFamily(row2, f3Name, htab1A);
-    ensureRowNotReplicated(row2, f3Name, htab2A, htab3A);
-    deleteAndWaitWithFamily(row2, f3Name, htab1A);
-
-    // B2. tableB can only replicated to cluster3
-    putAndWaitWithFamily(row2, f1Name, htab1B, htab3B);
-    ensureRowNotReplicated(row2, f1Name, htab2B);
-    deleteAndWaitWithFamily(row2, f1Name, htab1B, htab3B);
-
-    putAndWaitWithFamily(row2, f2Name, htab1B, htab3B);
-    ensureRowNotReplicated(row2, f2Name, htab2B);
-    deleteAndWaitWithFamily(row2, f2Name, htab1B, htab3B);
-
-    putAndWaitWithFamily(row2, f3Name, htab1B, htab3B);
-    ensureRowNotReplicated(row2, f3Name, htab2B);
-    deleteAndWaitWithFamily(row2, f3Name, htab1B, htab3B);
-
-    // B3. cf 'f1' of tableC non-replicable to either cluster
-    putAndWaitWithFamily(row2, f1Name, htab1C);
-    ensureRowNotReplicated(row2, f1Name, htab2C, htab3C);
-    deleteAndWaitWithFamily(row2, f1Name, htab1C);
-    //     cf 'f2' of tableC can only replicated to cluster2
-    putAndWaitWithFamily(row2, f2Name, htab1C, htab2C);
-    ensureRowNotReplicated(row2, f2Name, htab3C);
-    deleteAndWaitWithFamily(row2, f2Name, htab1C, htab2C);
-    //     cf 'f3' of tableC can replicated to cluster2 and cluster3
-    putAndWaitWithFamily(row2, f3Name, htab1C, htab2C, htab3C);
-    deleteAndWaitWithFamily(row2, f3Name, htab1C, htab2C, htab3C);
+    ReplicationAdmin replicationAdmin = new ReplicationAdmin(conf1);
+    Connection connection1 = ConnectionFactory.createConnection(conf1);
+    Connection connection2 = ConnectionFactory.createConnection(conf2);
+    Connection connection3 = ConnectionFactory.createConnection(conf3);
+    try {
+      Admin admin1 = connection1.getAdmin();
+      Admin admin2 = connection2.getAdmin();
+      Admin admin3 = connection3.getAdmin();
+
+      admin1.createTable(tabA);
+      admin1.createTable(tabB);
+      admin1.createTable(tabC);
+      admin2.createTable(tabA);
+      admin2.createTable(tabB);
+      admin2.createTable(tabC);
+      admin3.createTable(tabA);
+      admin3.createTable(tabB);
+      admin3.createTable(tabC);
+
+      Table htab1A = connection1.getTable(tabAName);
+      Table htab2A = connection2.getTable(tabAName);
+      Table htab3A = connection3.getTable(tabAName);
+
+      Table htab1B = connection1.getTable(tabBName);
+      Table htab2B = connection2.getTable(tabBName);
+      Table htab3B = connection3.getTable(tabBName);
+
+      Table htab1C = connection1.getTable(tabCName);
+      Table htab2C = connection2.getTable(tabCName);
+      Table htab3C = connection3.getTable(tabCName);
+
+      // A. add cluster2/cluster3 as peers to cluster1
+      replicationAdmin.addPeer("2", utility2.getClusterKey(), "TC;TB:f1,f3");
+      replicationAdmin.addPeer("3", utility3.getClusterKey(), "TA;TB:f1,f2");
+
+      // A1. tableA can only replicated to cluster3
+      putAndWaitWithFamily(row1, f1Name, htab1A, htab3A);
+      ensureRowNotReplicated(row1, f1Name, htab2A);
+      deleteAndWaitWithFamily(row1, f1Name, htab1A, htab3A);
+
+      putAndWaitWithFamily(row1, f2Name, htab1A, htab3A);
+      ensureRowNotReplicated(row1, f2Name, htab2A);
+      deleteAndWaitWithFamily(row1, f2Name, htab1A, htab3A);
+
+      putAndWaitWithFamily(row1, f3Name, htab1A, htab3A);
+      ensureRowNotReplicated(row1, f3Name, htab2A);
+      deleteAndWaitWithFamily(row1, f3Name, htab1A, htab3A);
+
+      // A2. cf 'f1' of tableB can replicated to both cluster2 and cluster3
+      putAndWaitWithFamily(row1, f1Name, htab1B, htab2B, htab3B);
+      deleteAndWaitWithFamily(row1, f1Name, htab1B, htab2B, htab3B);
+
+      //  cf 'f2' of tableB can only replicated to cluster3
+      putAndWaitWithFamily(row1, f2Name, htab1B, htab3B);
+      ensureRowNotReplicated(row1, f2Name, htab2B);
+      deleteAndWaitWithFamily(row1, f2Name, htab1B, htab3B);
+
+      //  cf 'f3' of tableB can only replicated to cluster2
+      putAndWaitWithFamily(row1, f3Name, htab1B, htab2B);
+      ensureRowNotReplicated(row1, f3Name, htab3B);
+      deleteAndWaitWithFamily(row1, f3Name, htab1B, htab2B);
+
+      // A3. tableC can only replicated to cluster2
+      putAndWaitWithFamily(row1, f1Name, htab1C, htab2C);
+      ensureRowNotReplicated(row1, f1Name, htab3C);
+      deleteAndWaitWithFamily(row1, f1Name, htab1C, htab2C);
+
+      putAndWaitWithFamily(row1, f2Name, htab1C, htab2C);
+      ensureRowNotReplicated(row1, f2Name, htab3C);
+      deleteAndWaitWithFamily(row1, f2Name, htab1C, htab2C);
+
+      putAndWaitWithFamily(row1, f3Name, htab1C, htab2C);
+      ensureRowNotReplicated(row1, f3Name, htab3C);
+      deleteAndWaitWithFamily(row1, f3Name, htab1C, htab2C);
+
+      // B. change peers' replicable table-cf config
+      replicationAdmin.setPeerTableCFs("2", "TA:f1,f2; TC:f2,f3");
+      replicationAdmin.setPeerTableCFs("3", "TB; TC:f3");
+
+      // B1. cf 'f1' of tableA can only replicated to cluster2
+      putAndWaitWithFamily(row2, f1Name, htab1A, htab2A);
+      ensureRowNotReplicated(row2, f1Name, htab3A);
+      deleteAndWaitWithFamily(row2, f1Name, htab1A, htab2A);
+      //     cf 'f2' of tableA can only replicated to cluster2
+      putAndWaitWithFamily(row2, f2Name, htab1A, htab2A);
+      ensureRowNotReplicated(row2, f2Name, htab3A);
+      deleteAndWaitWithFamily(row2, f2Name, htab1A, htab2A);
+      //     cf 'f3' of tableA isn't replicable to either cluster2 or cluster3
+      putAndWaitWithFamily(row2, f3Name, htab1A);
+      ensureRowNotReplicated(row2, f3Name, htab2A, htab3A);
+      deleteAndWaitWithFamily(row2, f3Name, htab1A);
+
+      // B2. tableB can only replicated to cluster3
+      putAndWaitWithFamily(row2, f1Name, htab1B, htab3B);
+      ensureRowNotReplicated(row2, f1Name, htab2B);
+      deleteAndWaitWithFamily(row2, f1Name, htab1B, htab3B);
+
+      putAndWaitWithFamily(row2, f2Name, htab1B, htab3B);
+      ensureRowNotReplicated(row2, f2Name, htab2B);
+      deleteAndWaitWithFamily(row2, f2Name, htab1B, htab3B);
+
+      putAndWaitWithFamily(row2, f3Name, htab1B, htab3B);
+      ensureRowNotReplicated(row2, f3Name, htab2B);
+      deleteAndWaitWithFamily(row2, f3Name, htab1B, htab3B);
+
+      // B3. cf 'f1' of tableC non-replicable to either cluster
+      putAndWaitWithFamily(row2, f1Name, htab1C);
+      ensureRowNotReplicated(row2, f1Name, htab2C, htab3C);
+      deleteAndWaitWithFamily(row2, f1Name, htab1C);
+      //     cf 'f2' of tableC can only replicated to cluster2
+      putAndWaitWithFamily(row2, f2Name, htab1C, htab2C);
+      ensureRowNotReplicated(row2, f2Name, htab3C);
+      deleteAndWaitWithFamily(row2, f2Name, htab1C, htab2C);
+      //     cf 'f3' of tableC can replicated to cluster2 and cluster3
+      putAndWaitWithFamily(row2, f3Name, htab1C, htab2C, htab3C);
+      deleteAndWaitWithFamily(row2, f3Name, htab1C, htab2C, htab3C);
+    } finally {
+      connection1.close();
+      connection2.close();
+      connection3.close();
+    }
  }
 
   private void ensureRowNotReplicated(byte[] row, byte[] fam, Table... tables) throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java
index 0971d8c..f8060ba 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.hbase.replication;
 
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 import java.io.IOException;
 
 import org.apache.commons.logging.Log;
@@ -28,7 +31,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
@@ -38,12 +41,9 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
 import org.junit.After;
 import org.junit.AfterClass;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
 import org.junit.Before;
 import org.junit.BeforeClass;
+import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 @Category({ReplicationTests.class, MediumTests.class})
@@ -149,7 +149,7 @@ public class TestReplicationStateZKImpl extends TestReplicationStateBasic {
     }
 
     @Override
-    public HConnection getShortCircuitConnection() {
+    public ClusterConnection getConnection() {
       return null;
     }
 
@@ -184,5 +184,4 @@ public class TestReplicationStateZKImpl extends TestReplicationStateBasic {
       return this.isStopped;
     }
   }
-}
-
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
index 73a631e..72cbf8f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
@@ -17,6 +17,10 @@
  */
 package org.apache.hadoop.hbase.replication;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
 import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -25,12 +29,12 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ClusterId;
+import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.CoordinatedStateManager;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
@@ -38,13 +42,9 @@ import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.junit.AfterClass;
-import org.junit.Test;
-import org.junit.Ignore;
-
-import static org.junit.Assert.*;
-
 import org.junit.Before;
 import org.junit.BeforeClass;
+import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 /**
@@ -250,7 +250,7 @@ public class TestReplicationTrackerZKImpl {
     }
 
     @Override
-    public HConnection getShortCircuitConnection() {
+    public ClusterConnection getConnection() {
       return null;
     }
 
@@ -285,5 +285,4 @@ public class TestReplicationTrackerZKImpl {
       return this.isStopped;
     }
   }
-}
-
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
index ff6a079..f745f8c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
@@ -50,6 +50,8 @@ import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.wal.WAL;
@@ -115,7 +117,7 @@ public class TestReplicationSourceManager {
   private static Path oldLogDir;
 
   private static Path logDir;
-  
+
   private static CountDownLatch latch;
 
   private static List<String> files = new ArrayList<String>();
@@ -154,7 +156,7 @@ public class TestReplicationSourceManager {
         HConstants.HREGION_LOGDIR_NAME);
     replication = new Replication(new DummyServer(), fs, logDir, oldLogDir);
     manager = replication.getReplicationManager();
-    
+
     manager.addSource(slaveId);
 
     htd = new HTableDescriptor(test);
@@ -243,7 +245,7 @@ public class TestReplicationSourceManager {
 
     // TODO Need a case with only 2 WALs and we only want to delete the first one
   }
-  
+
   @Test
   public void testClaimQueues() throws Exception {
     LOG.debug("testNodeFailoverWorkerCopyQueuesFromRSUsingMulti");
@@ -286,7 +288,7 @@ public class TestReplicationSourceManager {
     assertEquals(1, populatedMap);
     server.abort("", null);
   }
-  
+
   @Test
   public void testCleanupFailoverQueues() throws Exception {
     final Server server = new DummyServer("hostname1.example.org");
@@ -366,8 +368,8 @@ public class TestReplicationSourceManager {
 
     server.abort("", null);
   }
-  
-  
+
+
   static class DummyNodeFailoverWorker extends Thread {
     private SortedMap<String, SortedSet<String>> logZnodesMap;
     Server server;
@@ -416,7 +418,7 @@ public class TestReplicationSourceManager {
       return 0;
     }
   }
-  
+
   static class DummyServer implements Server {
     String hostname;
 
@@ -443,7 +445,7 @@ public class TestReplicationSourceManager {
       return null;
     }
     @Override
-    public HConnection getShortCircuitConnection() {
+    public ClusterConnection getConnection() {
       return null;
     }
 
@@ -477,6 +479,4 @@ public class TestReplicationSourceManager {
       return false; // To change body of implemented methods use File | Settings | File Templates.
     }
   }
-
 }
-

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index 76aa656..b1a6ccd 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -27,8 +27,6 @@ import static org.junit.Assert.fail;
 import java.io.IOException;
 import java.security.PrivilegedAction;
 import java.util.List;
-import java.util.Map;
-import java.util.NavigableMap;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -44,6 +42,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
@@ -61,6 +60,7 @@ import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
@@ -436,20 +436,19 @@ public class TestAccessController extends SecureTestUtil {
 
   @Test
   public void testMove() throws Exception {
-    Map<HRegionInfo, ServerName> regions;
-    HTable table = new HTable(TEST_UTIL.getConfiguration(), TEST_TABLE.getTableName());
-    try {
-      regions = table.getRegionLocations();
-    } finally {
-      table.close();
+    List<HRegionLocation> regions;
+    try (RegionLocator locator =
+      TEST_UTIL.getConnection().getRegionLocator(TEST_TABLE.getTableName())) {
+      regions = locator.getAllRegionLocations();
     }
-    final Map.Entry<HRegionInfo, ServerName> firstRegion = regions.entrySet().iterator().next();
-    final ServerName server = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName();
+    HRegionLocation location = regions.get(0);
+    final HRegionInfo hri = location.getRegionInfo();
+    final ServerName server = location.getServerName();
     AccessTestAction action = new AccessTestAction() {
       @Override
       public Object run() throws Exception {
         ACCESS_CONTROLLER.preMove(ObserverContext.createAndPrepare(CP_ENV, null),
-          firstRegion.getKey(), server, server);
+          hri, server, server);
         return null;
       }
     };
@@ -460,20 +459,17 @@ public class TestAccessController extends SecureTestUtil {
 
   @Test
   public void testAssign() throws Exception {
-    Map<HRegionInfo, ServerName> regions;
-    HTable table = new HTable(TEST_UTIL.getConfiguration(), TEST_TABLE.getTableName());
-    try {
-      regions = table.getRegionLocations();
-    } finally {
-      table.close();
+    List<HRegionLocation> regions;
+    try (RegionLocator locator =
+      TEST_UTIL.getConnection().getRegionLocator(TEST_TABLE.getTableName())) {
+      regions = locator.getAllRegionLocations();
     }
-    final Map.Entry<HRegionInfo, ServerName> firstRegion = regions.entrySet().iterator().next();
-
+    HRegionLocation location = regions.get(0);
+    final HRegionInfo hri = location.getRegionInfo();
     AccessTestAction action = new AccessTestAction() {
       @Override
       public Object run() throws Exception {
-        ACCESS_CONTROLLER.preAssign(ObserverContext.createAndPrepare(CP_ENV, null),
-          firstRegion.getKey());
+        ACCESS_CONTROLLER.preAssign(ObserverContext.createAndPrepare(CP_ENV, null), hri);
         return null;
       }
     };
@@ -484,20 +480,17 @@ public class TestAccessController extends SecureTestUtil {
 
   @Test
   public void testUnassign() throws Exception {
-    Map<HRegionInfo, ServerName> regions;
-    HTable table = new HTable(TEST_UTIL.getConfiguration(), TEST_TABLE.getTableName());
-    try {
-      regions = table.getRegionLocations();
-    } finally {
-      table.close();
+    List<HRegionLocation> regions;
+    try (RegionLocator locator =
+      TEST_UTIL.getConnection().getRegionLocator(TEST_TABLE.getTableName())) {
+      regions = locator.getAllRegionLocations();
     }
-    final Map.Entry<HRegionInfo, ServerName> firstRegion = regions.entrySet().iterator().next();
-
+    HRegionLocation location = regions.get(0);
+    final HRegionInfo hri = location.getRegionInfo();
     AccessTestAction action = new AccessTestAction() {
       @Override
       public Object run() throws Exception {
-        ACCESS_CONTROLLER.preUnassign(ObserverContext.createAndPrepare(CP_ENV, null),
-          firstRegion.getKey(), false);
+        ACCESS_CONTROLLER.preUnassign(ObserverContext.createAndPrepare(CP_ENV, null), hri, false);
         return null;
       }
     };
@@ -508,20 +501,17 @@ public class TestAccessController extends SecureTestUtil {
 
   @Test
   public void testRegionOffline() throws Exception {
-    Map<HRegionInfo, ServerName> regions;
-    HTable table = new HTable(TEST_UTIL.getConfiguration(), TEST_TABLE.getTableName());
-    try {
-      regions = table.getRegionLocations();
-    } finally {
-      table.close();
+    List<HRegionLocation> regions;
+    try (RegionLocator locator =
+      TEST_UTIL.getConnection().getRegionLocator(TEST_TABLE.getTableName())) {
+      regions = locator.getAllRegionLocations();
     }
-    final Map.Entry<HRegionInfo, ServerName> firstRegion = regions.entrySet().iterator().next();
-
+    HRegionLocation location = regions.get(0);
+    final HRegionInfo hri = location.getRegionInfo();
     AccessTestAction action = new AccessTestAction() {
       @Override
       public Object run() throws Exception {
-        ACCESS_CONTROLLER.preRegionOffline(ObserverContext.createAndPrepare(CP_ENV, null),
-          firstRegion.getKey());
+        ACCESS_CONTROLLER.preRegionOffline(ObserverContext.createAndPrepare(CP_ENV, null), hri);
         return null;
       }
     };
@@ -922,14 +912,12 @@ public class TestAccessController extends SecureTestUtil {
       //set global read so RegionServer can move it
       setPermission(loadPath, FsPermission.valueOf("-rwxrwxrwx"));
 
-      HTable table = new HTable(conf, tableName);
-      try {
-        Admin admin = TEST_UTIL.getHBaseAdmin();
-        TEST_UTIL.waitTableEnabled(admin, tableName.getName());
-        LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
-        loader.doBulkLoad(loadPath, table);
-      } finally {
-        table.close();
+      try (HTable table = (HTable)TEST_UTIL.getConnection().getTable(tableName)) {
+        try (Admin admin = TEST_UTIL.getHBaseAdmin()) {
+          TEST_UTIL.waitTableEnabled(admin, tableName.getName());
+          LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
+          loader.doBulkLoad(loadPath, table);
+        }
       }
     }
 
@@ -1988,18 +1976,19 @@ public class TestAccessController extends SecureTestUtil {
     final HRegionServer newRs = newRsThread.getRegionServer();
 
     // Move region to the new RegionServer.
-    final HTable table = new HTable(TEST_UTIL.getConfiguration(), TEST_TABLE2);
-    try {
-      NavigableMap<HRegionInfo, ServerName> regions = table
-          .getRegionLocations();
-      final Map.Entry<HRegionInfo, ServerName> firstRegion = regions.entrySet()
-          .iterator().next();
-
+    List<HRegionLocation> regions;
+    try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(TEST_TABLE2)) {
+      regions = locator.getAllRegionLocations();
+    }
+    HRegionLocation location = regions.get(0);
+    final HRegionInfo hri = location.getRegionInfo();
+    final ServerName server = location.getServerName();
+    try (HTable table = (HTable)TEST_UTIL.getConnection().getTable(TEST_TABLE2)) {
       AccessTestAction moveAction = new AccessTestAction() {
         @Override
         public Object run() throws Exception {
-          admin.move(firstRegion.getKey().getEncodedNameAsBytes(),
-              Bytes.toBytes(newRs.getServerName().getServerName()));
+          admin.move(hri.getEncodedNameAsBytes(),
+            Bytes.toBytes(newRs.getServerName().getServerName()));
           return null;
         }
       };
@@ -2031,8 +2020,6 @@ public class TestAccessController extends SecureTestUtil {
         }
       };
       USER_ADMIN.runAs(putAction);
-    } finally {
-      table.close();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java
index 6fa6cbd..2bde357 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java
@@ -17,7 +17,9 @@
  */
 package org.apache.hadoop.hbase.security.access;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
 
 import java.util.List;
 
@@ -26,7 +28,8 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
@@ -77,7 +80,7 @@ public class TestAccessController2 extends SecureTestUtil {
   @Test
   public void testCreateWithCorrectOwner() throws Exception {
     // Create a test user
-    User testUser = User.createUserForTesting(TEST_UTIL.getConfiguration(), "TestUser",
+    final User testUser = User.createUserForTesting(TEST_UTIL.getConfiguration(), "TestUser",
       new String[0]);
     // Grant the test user the ability to create tables
     SecureTestUtil.grantGlobal(TEST_UTIL, testUser.getShortName(), Action.CREATE);
@@ -86,11 +89,11 @@ public class TestAccessController2 extends SecureTestUtil {
       public Object run() throws Exception {
         HTableDescriptor desc = new HTableDescriptor(TEST_TABLE.getTableName());
         desc.addFamily(new HColumnDescriptor(TEST_FAMILY));
-        Admin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
-        try {
-          admin.createTable(desc);
-        } finally {
-          admin.close();
+        try (Connection connection =
+            ConnectionFactory.createConnection(TEST_UTIL.getConfiguration(), testUser)) {
+          try (Admin admin = connection.getAdmin()) {
+            admin.createTable(desc);
+          }
         }
         return null;
       }
@@ -98,7 +101,8 @@ public class TestAccessController2 extends SecureTestUtil {
     TEST_UTIL.waitTableEnabled(TEST_TABLE.getTableName());
     // Verify that owner permissions have been granted to the test user on the
     // table just created
-    List<TablePermission> perms = AccessControlLists.getTablePermissions(conf, TEST_TABLE.getTableName())
+    List<TablePermission> perms =
+      AccessControlLists.getTablePermissions(conf, TEST_TABLE.getTableName())
        .get(testUser.getShortName());
     assertNotNull(perms);
     assertFalse(perms.isEmpty());

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
index ce143ab..6a62071 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hbase.security.token;
 
-import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
@@ -40,7 +40,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.ipc.BlockingRpcCallback;
@@ -145,7 +145,7 @@ public class TestTokenAuthentication {
     }
 
     @Override
-    public HConnection getShortCircuitConnection() {
+    public ClusterConnection getConnection() {
       return null;
     }
 
@@ -316,7 +316,6 @@ public class TestTokenAuthentication {
     }
   }
 
-
   private static HBaseTestingUtility TEST_UTIL;
   private static TokenServer server;
   private static Thread serverThread;

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
index ca83eb2..018a417 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 
@@ -102,7 +102,7 @@ public class MockServer implements Server {
   }
 
   @Override
-  public HConnection getShortCircuitConnection() {
+  public ClusterConnection getConnection() {
     return null;
   }
 
@@ -121,4 +121,4 @@ public class MockServer implements Server {
     // TODO Auto-generated method stub
     return this.aborted;
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
index e756f48..d4a7fdc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
@@ -230,14 +230,9 @@ public class TestHBaseFsck {
   @Test(timeout=180000)
   public void testFixAssignmentsWhenMETAinTransition() throws Exception {
     MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
-    Admin admin = null;
-    try {
-      admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
-      admin.closeRegion(cluster.getServerHoldingMeta(),
-          HRegionInfo.FIRST_META_REGIONINFO);
-    } finally {
-      if (admin != null) {
-        admin.close();
+    try (Connection connection = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) {
+      try (Admin admin = connection.getAdmin()) {
+        admin.closeRegion(cluster.getServerHoldingMeta(), HRegionInfo.FIRST_META_REGIONINFO);
       }
     }
     regionStates.regionOffline(HRegionInfo.FIRST_META_REGIONINFO);
@@ -391,8 +386,7 @@ public class TestHBaseFsck {
     HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM));
     desc.addFamily(hcd); // If a table has no CF's it doesn't get checked
     TEST_UTIL.getHBaseAdmin().createTable(desc, SPLITS);
-    tbl = new HTable(TEST_UTIL.getConfiguration(), tablename, executorService);
-
+    tbl = (HTable)TEST_UTIL.getConnection().getTable(tablename, executorService);
     List<Put> puts = new ArrayList<Put>();
     for (byte[] row : ROWKEYS) {
       Put p = new Put(row);
@@ -821,8 +815,7 @@ public class TestHBaseFsck {
    */
   @Test
   public void testDegenerateRegions() throws Exception {
-    TableName table =
-        TableName.valueOf("tableDegenerateRegions");
+    TableName table = TableName.valueOf("tableDegenerateRegions");
     try {
       setupTable(table);
       assertNoErrors(doFsck(conf,false));

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java
index 165cac3..c5aaf90 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java
@@ -40,9 +40,10 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
@@ -81,13 +82,14 @@ public class OfflineMetaRebuildTestCore {
   private final static byte[] FAM = Bytes.toBytes("fam");
 
   // for the instance, reset every test run
-  protected HTable htbl;
+  protected Table htbl;
   protected final static byte[][] splits = new byte[][] { Bytes.toBytes("A"),
       Bytes.toBytes("B"), Bytes.toBytes("C") };
 
   private final static String TABLE_BASE = "tableMetaRebuild";
   private static int tableIdx = 0;
   protected TableName table = TableName.valueOf("tableMetaRebuild");
+  protected Connection connection;
 
   @Before
   public void setUpBefore() throws Exception {
@@ -95,6 +97,7 @@ public class OfflineMetaRebuildTestCore {
     TEST_UTIL.getConfiguration().setInt("dfs.datanode.max.xceivers", 9192);
     TEST_UTIL.startMiniCluster(3);
     conf = TEST_UTIL.getConfiguration();
+    this.connection = ConnectionFactory.createConnection(conf);
     assertEquals(0, TEST_UTIL.getHBaseAdmin().listTables().length);
 
     // setup the table
@@ -112,8 +115,12 @@ public class OfflineMetaRebuildTestCore {
 
   @After
   public void tearDownAfter() throws Exception {
+    if (this.htbl != null) {
+      this.htbl.close();
+      this.htbl = null;
+    }
+    this.connection.close();
     TEST_UTIL.shutdownMiniCluster();
-    HConnectionManager.deleteConnection(conf);
   }
 
   /**
@@ -123,12 +130,12 @@ public class OfflineMetaRebuildTestCore {
    * @throws InterruptedException
    * @throws KeeperException
    */
-  private HTable setupTable(TableName tablename) throws Exception {
+  private Table setupTable(TableName tablename) throws Exception {
     HTableDescriptor desc = new HTableDescriptor(tablename);
     HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM));
     desc.addFamily(hcd); // If a table has no CF's it doesn't get checked
     TEST_UTIL.getHBaseAdmin().createTable(desc, splits);
-    return new HTable(TEST_UTIL.getConfiguration(), tablename);
+    return this.connection.getTable(tablename);
   }
 
   private void dumpMeta(HTableDescriptor htd) throws IOException {
@@ -167,14 +174,14 @@ public class OfflineMetaRebuildTestCore {
     }
   }
 
-  protected void deleteRegion(Configuration conf, final HTable tbl,
+  protected void deleteRegion(Configuration conf, final Table tbl,
       byte[] startKey, byte[] endKey) throws IOException {
 
     LOG.info("Before delete:");
     HTableDescriptor htd = tbl.getTableDescriptor();
     dumpMeta(htd);
 
-    Map<HRegionInfo, ServerName> hris = tbl.getRegionLocations();
+    Map<HRegionInfo, ServerName> hris = ((HTable)tbl).getRegionLocations();
     for (Entry<HRegionInfo, ServerName> e : hris.entrySet()) {
       HRegionInfo hri = e.getKey();
       ServerName hsa = e.getValue();
@@ -192,10 +199,10 @@ public class OfflineMetaRebuildTestCore {
             hri.getEncodedName());
         fs.delete(p, true);
 
-        Table meta = new HTable(conf, TableName.META_TABLE_NAME);
-        Delete delete = new Delete(deleteRow);
-        meta.delete(delete);
-        meta.close();
+        try (Table meta = this.connection.getTable(TableName.META_TABLE_NAME)) {
+          Delete delete = new Delete(deleteRow);
+          meta.delete(delete);
+        }
       }
       LOG.info(hri.toString() + hsa.toString());
     }
@@ -289,4 +296,14 @@ public class OfflineMetaRebuildTestCore {
     meta.close();
     return count;
   }
+
+  protected HTableDescriptor[] getTables(final Configuration configuration) throws IOException {
+    HTableDescriptor[] htbls = null;
+    try (Connection connection = ConnectionFactory.createConnection(configuration)) {
+      try (Admin admin = connection.getAdmin()) {
+        htbls = admin.listTables();
+      }
+    }
+    return htbls;
+  }
 }


Mime
View raw message