hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject [2/6] hbase git commit: HBASE-12404 Task 5 from parent: Replace internal HTable constructor use with HConnection#getTable (0.98, 0.99)
Date Tue, 25 Nov 2014 20:27:28 GMT
http://git-wip-us.apache.org/repos/asf/hbase/blob/c0cdaf84/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
index a1a10fd..8a90f48 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
@@ -44,14 +44,14 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.LargeTests;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.RegionTransition;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableStateManager;
-import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
-import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.master.RegionState.State;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -378,8 +378,8 @@ public class TestMasterFailover {
 
     // Regions of table of merging regions
     // Cause: Master was down while merging was going on
-    ((BaseCoordinatedStateManager) hrs.getCoordinatedStateManager())
-      .getRegionMergeCoordination().startRegionMergeTransaction(newRegion, mergingServer, a, b);
+    hrs.getCoordinatedStateManager().
+      getRegionMergeCoordination().startRegionMergeTransaction(newRegion, mergingServer, a, b);
 
     /*
      * ZK = NONE
@@ -1205,8 +1205,8 @@ public class TestMasterFailover {
     assertTrue(master.isInitialized());
 
     // Create a table with a region online
-    HTable onlineTable = TEST_UTIL.createTable(TableName.valueOf("onlineTable"), "family");
-
+    Table onlineTable = TEST_UTIL.createTable(TableName.valueOf("onlineTable"), "family");
+    onlineTable.close();
     // Create a table in META, so it has a region offline
     HTableDescriptor offlineTable = new HTableDescriptor(
       TableName.valueOf(Bytes.toBytes("offlineTable")));
@@ -1219,16 +1219,18 @@ public class TestMasterFailover {
 
     HRegionInfo hriOffline = new HRegionInfo(offlineTable.getTableName(), null, null);
     createRegion(hriOffline, rootdir, conf, offlineTable);
-    MetaTableAccessor.addRegionToMeta(master.getShortCircuitConnection(), hriOffline);
+    MetaTableAccessor.addRegionToMeta(master.getConnection(), hriOffline);
 
     log("Regions in hbase:meta and namespace have been created");
 
     // at this point we only expect 3 regions to be assigned out
     // (catalogs and namespace, + 1 online region)
     assertEquals(3, cluster.countServedRegions());
-    HRegionInfo hriOnline = onlineTable.getRegionLocation(
-      HConstants.EMPTY_START_ROW).getRegionInfo();
-
+    HRegionInfo hriOnline = null;
+    try (RegionLocator locator =
+        TEST_UTIL.getConnection().getRegionLocator(TableName.valueOf("onlineTable"))) {
+      hriOnline = locator.getRegionLocation(HConstants.EMPTY_START_ROW).getRegionInfo();
+    }
     RegionStates regionStates = master.getAssignmentManager().getRegionStates();
     RegionStateStore stateStore = master.getAssignmentManager().getRegionStateStore();
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/c0cdaf84/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
index de7b8f8..430673a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
@@ -45,7 +45,7 @@ import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
@@ -206,7 +206,7 @@ public class TestMasterNoCluster {
       }
 
       @Override
-      public HConnection getShortCircuitConnection() {
+      public ClusterConnection getConnection() {
         // Insert a mock for the connection, use TESTUTIL.getConfiguration rather than
         // the conf from the master; the conf will already have an HConnection
         // associate so the below mocking of a connection will fail.
@@ -287,7 +287,7 @@ public class TestMasterNoCluster {
       }
 
       @Override
-      public HConnection getShortCircuitConnection() {
+      public ClusterConnection getConnection() {
         // Insert a mock for the connection, use TESTUTIL.getConfiguration rather than
         // the conf from the master; the conf will already have an HConnection
         // associate so the below mocking of a connection will fail.

http://git-wip-us.apache.org/repos/asf/hbase/blob/c0cdaf84/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
index 746fcd0..a82cc2f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
@@ -44,7 +44,9 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.MetaTableAccessor.Visitor;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
@@ -61,7 +63,8 @@ import org.junit.experimental.categories.Category;
 public class TestMasterOperationsForRegionReplicas {
   final static Log LOG = LogFactory.getLog(TestRegionPlacement.class);
   private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-  private static HBaseAdmin admin;
+  private static Connection CONNECTION = null;
+  private static Admin ADMIN;
   private static int numSlaves = 2;
   private static Configuration conf;
 
@@ -70,14 +73,17 @@ public class TestMasterOperationsForRegionReplicas {
     conf = TEST_UTIL.getConfiguration();
     conf.setBoolean("hbase.tests.use.shortcircuit.reads", false);
     TEST_UTIL.startMiniCluster(numSlaves);
-    admin = new HBaseAdmin(conf);
-    while(admin.getClusterStatus().getServers().size() < numSlaves) {
+    CONNECTION = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
+    ADMIN = CONNECTION.getAdmin();
+    while(ADMIN.getClusterStatus().getServers().size() < numSlaves) {
       Thread.sleep(100);
     }
   }
 
   @AfterClass
   public static void tearDownAfterClass() throws Exception {
+    if (ADMIN != null) ADMIN.close();
+    if (CONNECTION != null && !CONNECTION.isClosed()) CONNECTION.close();
     TEST_UTIL.shutdownMiniCluster();
   }
 
@@ -90,15 +96,15 @@ public class TestMasterOperationsForRegionReplicas {
       HTableDescriptor desc = new HTableDescriptor(table);
       desc.setRegionReplication(numReplica);
       desc.addFamily(new HColumnDescriptor("family"));
-      admin.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions);
+      ADMIN.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions);
 
-      validateNumberOfRowsInMeta(table, numRegions, admin.getConnection());
+      validateNumberOfRowsInMeta(table, numRegions, ADMIN.getConnection());
       List<HRegionInfo> hris = MetaTableAccessor.getTableRegions(TEST_UTIL.getZooKeeperWatcher(),
-        admin.getConnection(), table);
+        ADMIN.getConnection(), table);
       assert(hris.size() == numRegions * numReplica);
     } finally {
-      admin.disableTable(table);
-      admin.deleteTable(table);
+      ADMIN.disableTable(table);
+      ADMIN.deleteTable(table);
     }
   }
 
@@ -111,12 +117,12 @@ public class TestMasterOperationsForRegionReplicas {
       HTableDescriptor desc = new HTableDescriptor(table);
       desc.setRegionReplication(numReplica);
       desc.addFamily(new HColumnDescriptor("family"));
-      admin.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions);
+      ADMIN.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions);
       TEST_UTIL.waitTableEnabled(table);
-      validateNumberOfRowsInMeta(table, numRegions, admin.getConnection());
+      validateNumberOfRowsInMeta(table, numRegions, ADMIN.getConnection());
 
-      List<HRegionInfo> hris = MetaTableAccessor.getTableRegions(TEST_UTIL.getZooKeeperWatcher(),
-        admin.getConnection(), table);
+      List<HRegionInfo> hris = MetaTableAccessor.getTableRegions(
+        TEST_UTIL.getZooKeeperWatcher(), ADMIN.getConnection(), table);
       assert(hris.size() == numRegions * numReplica);
       // check that the master created expected number of RegionState objects
       for (int i = 0; i < numRegions; i++) {
@@ -128,7 +134,7 @@ public class TestMasterOperationsForRegionReplicas {
         }
       }
 
-      List<Result> metaRows = MetaTableAccessor.fullScanOfMeta(admin.getConnection());
+      List<Result> metaRows = MetaTableAccessor.fullScanOfMeta(ADMIN.getConnection());
       int numRows = 0;
       for (Result result : metaRows) {
         RegionLocations locations = MetaTableAccessor.getRegionLocations(result);
@@ -145,7 +151,7 @@ public class TestMasterOperationsForRegionReplicas {
       // The same verification of the meta as above but with the SnapshotOfRegionAssignmentFromMeta
       // class
       validateFromSnapshotFromMeta(TEST_UTIL, table, numRegions, numReplica,
-        admin.getConnection());
+        ADMIN.getConnection());
 
       // Now kill the master, restart it and see if the assignments are kept
       ServerName master = TEST_UTIL.getHBaseClusterInterface().getClusterStatus().getMaster();
@@ -162,7 +168,7 @@ public class TestMasterOperationsForRegionReplicas {
         }
       }
       validateFromSnapshotFromMeta(TEST_UTIL, table, numRegions, numReplica,
-        admin.getConnection());
+        ADMIN.getConnection());
 
       // Now shut the whole cluster down, and verify the assignments are kept so that the
       // availability constraints are met.
@@ -170,47 +176,43 @@ public class TestMasterOperationsForRegionReplicas {
       TEST_UTIL.shutdownMiniHBaseCluster();
       TEST_UTIL.startMiniHBaseCluster(1, numSlaves);
       TEST_UTIL.waitTableEnabled(table);
-      admin.close();
-      admin = new HBaseAdmin(conf); 
       validateFromSnapshotFromMeta(TEST_UTIL, table, numRegions, numReplica,
-        admin.getConnection());
+        ADMIN.getConnection());
 
       // Now shut the whole cluster down, and verify regions are assigned even if there is only
       // one server running
       TEST_UTIL.shutdownMiniHBaseCluster();
       TEST_UTIL.startMiniHBaseCluster(1, 1);
       TEST_UTIL.waitTableEnabled(table);
-      admin.close();
-      admin = new HBaseAdmin(conf);
-      validateSingleRegionServerAssignment(admin.getConnection(), numRegions, numReplica);
+      validateSingleRegionServerAssignment(ADMIN.getConnection(), numRegions, numReplica);
       for (int i = 1; i < numSlaves; i++) { //restore the cluster
         TEST_UTIL.getMiniHBaseCluster().startRegionServer();
       }
 
       //check on alter table
-      admin.disableTable(table);
-      assert(admin.isTableDisabled(table));
+      ADMIN.disableTable(table);
+      assert(ADMIN.isTableDisabled(table));
       //increase the replica
       desc.setRegionReplication(numReplica + 1);
-      admin.modifyTable(table, desc);
-      admin.enableTable(table);
-      assert(admin.isTableEnabled(table));
+      ADMIN.modifyTable(table, desc);
+      ADMIN.enableTable(table);
+      assert(ADMIN.isTableEnabled(table));
       List<HRegionInfo> regions = TEST_UTIL.getMiniHBaseCluster().getMaster()
           .getAssignmentManager().getRegionStates().getRegionsOfTable(table);
       assert(regions.size() == numRegions * (numReplica + 1));
 
       //decrease the replica(earlier, table was modified to have a replica count of numReplica + 1)
-      admin.disableTable(table);
+      ADMIN.disableTable(table);
       desc.setRegionReplication(numReplica);
-      admin.modifyTable(table, desc);
-      admin.enableTable(table);
-      assert(admin.isTableEnabled(table));
+      ADMIN.modifyTable(table, desc);
+      ADMIN.enableTable(table);
+      assert(ADMIN.isTableEnabled(table));
       regions = TEST_UTIL.getMiniHBaseCluster().getMaster()
           .getAssignmentManager().getRegionStates().getRegionsOfTable(table);
       assert(regions.size() == numRegions * numReplica);
       //also make sure the meta table has the replica locations removed
       hris = MetaTableAccessor.getTableRegions(TEST_UTIL.getZooKeeperWatcher(),
-        admin.getConnection(), table);
+        ADMIN.getConnection(), table);
       assert(hris.size() == numRegions * numReplica);
       //just check that the number of default replica regions in the meta table are the same
       //as the number of regions the table was created with, and the count of the
@@ -226,8 +228,8 @@ public class TestMasterOperationsForRegionReplicas {
       Collection<Integer> counts = new HashSet<Integer>(defaultReplicas.values());
       assert(counts.size() == 1 && counts.contains(new Integer(numReplica)));
     } finally {
-      admin.disableTable(table);
-      admin.deleteTable(table);
+      ADMIN.disableTable(table);
+      ADMIN.deleteTable(table);
     }
   }
 
@@ -242,18 +244,18 @@ public class TestMasterOperationsForRegionReplicas {
       HTableDescriptor desc = new HTableDescriptor(table);
       desc.setRegionReplication(numReplica);
       desc.addFamily(new HColumnDescriptor("family"));
-      admin.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions);
+      ADMIN.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions);
       TEST_UTIL.waitTableEnabled(table);
       Set<byte[]> tableRows = new HashSet<byte[]>();
       List<HRegionInfo> hris = MetaTableAccessor.getTableRegions(TEST_UTIL.getZooKeeperWatcher(),
-        admin.getConnection(), table);
+        ADMIN.getConnection(), table);
       for (HRegionInfo hri : hris) {
         tableRows.add(hri.getRegionName());
       }
-      admin.disableTable(table);
+      ADMIN.disableTable(table);
       // now delete one replica info from all the rows
       // this is to make the meta appear to be only partially updated
-      Table metaTable = new HTable(TableName.META_TABLE_NAME, admin.getConnection());
+      Table metaTable = new HTable(TableName.META_TABLE_NAME, ADMIN.getConnection());
       for (byte[] row : tableRows) {
         Delete deleteOneReplicaLocation = new Delete(row);
         deleteOneReplicaLocation.deleteColumns(HConstants.CATALOG_FAMILY,
@@ -267,14 +269,14 @@ public class TestMasterOperationsForRegionReplicas {
       metaTable.close();
       // even if the meta table is partly updated, when we re-enable the table, we should
       // get back the desired number of replicas for the regions
-      admin.enableTable(table);
-      assert(admin.isTableEnabled(table));
+      ADMIN.enableTable(table);
+      assert(ADMIN.isTableEnabled(table));
       List<HRegionInfo> regions = TEST_UTIL.getMiniHBaseCluster().getMaster()
           .getAssignmentManager().getRegionStates().getRegionsOfTable(table);
       assert(regions.size() == numRegions * numReplica);
     } finally {
-      admin.disableTable(table);
-      admin.deleteTable(table);
+      ADMIN.disableTable(table);
+      ADMIN.deleteTable(table);
     }
   }
 
@@ -288,7 +290,7 @@ public class TestMasterOperationsForRegionReplicas {
 
   private void validateNumberOfRowsInMeta(final TableName table, int numRegions,
       Connection connection) throws IOException {
-    assert(admin.tableExists(table));
+    assert(ADMIN.tableExists(table));
     final AtomicInteger count = new AtomicInteger();
     Visitor visitor = new Visitor() {
       @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/c0cdaf84/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java
index 9a3ec91..711e5d9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java
@@ -25,6 +25,8 @@ import static org.junit.Assert.assertTrue;
 
 import java.util.List;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -32,12 +34,18 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.LargeTests;
 import org.apache.hadoop.hbase.LocalHBaseCluster;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.LargeTests;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 @Category(LargeTests.class)
 public class TestMasterShutdown {
+  public static final Log LOG = LogFactory.getLog(TestMasterShutdown.class);
+
   /**
    * Simple test of shutdown.
    * <p>
@@ -45,9 +53,8 @@ public class TestMasterShutdown {
    * Verifies that all masters are properly shutdown.
    * @throws Exception
    */
-  @Test (timeout=240000)
+  @Test (timeout=120000)
   public void testMasterShutdown() throws Exception {
-
     final int NUM_MASTERS = 3;
     final int NUM_RS = 3;
 
@@ -55,9 +62,9 @@ public class TestMasterShutdown {
     Configuration conf = HBaseConfiguration.create();
 
     // Start the cluster
-    HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
-    TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
-    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
+    HBaseTestingUtility htu = new HBaseTestingUtility(conf);
+    htu.startMiniCluster(NUM_MASTERS, NUM_RS);
+    MiniHBaseCluster cluster = htu.getHBaseCluster();
 
     // get all the master threads
     List<MasterThread> masterThreads = cluster.getMasterThreads();
@@ -83,19 +90,18 @@ public class TestMasterShutdown {
 
     // tell the active master to shutdown the cluster
     active.shutdown();
-    
+
     for (int i = NUM_MASTERS - 1; i >= 0 ;--i) {
       cluster.waitOnMaster(i);
     }
     // make sure all the masters properly shutdown
-    assertEquals(0,masterThreads.size());
-    
-    TEST_UTIL.shutdownMiniCluster();
+    assertEquals(0, masterThreads.size());
+
+    htu.shutdownMiniCluster();
   }
 
-  @Test(timeout = 180000)
+  @Test(timeout = 60000)
   public void testMasterShutdownBeforeStartingAnyRegionServer() throws Exception {
-
     final int NUM_MASTERS = 1;
     final int NUM_RS = 0;
 
@@ -105,25 +111,35 @@ public class TestMasterShutdown {
     conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 1);
 
     // Start the cluster
-    final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
-    TEST_UTIL.startMiniDFSCluster(3);
-    TEST_UTIL.startMiniZKCluster();
-    TEST_UTIL.createRootDir();
+    final HBaseTestingUtility util = new HBaseTestingUtility(conf);
+    util.startMiniDFSCluster(3);
+    util.startMiniZKCluster();
+    util.createRootDir();
     final LocalHBaseCluster cluster =
         new LocalHBaseCluster(conf, NUM_MASTERS, NUM_RS, HMaster.class,
             MiniHBaseCluster.MiniHBaseClusterRegionServer.class);
-    final MasterThread master = cluster.getMasters().get(0);
+    final int MASTER_INDEX = 0;
+    final MasterThread master = cluster.getMasters().get(MASTER_INDEX);
     master.start();
+    LOG.info("Called master start on " + master.getName());
     Thread shutdownThread = new Thread() {
       public void run() {
+        LOG.info("Before call to shutdown master");
         try {
-          TEST_UTIL.getHBaseAdmin().shutdown();
-          cluster.waitOnMaster(0);
+          try (Connection connection =
+              ConnectionFactory.createConnection(util.getConfiguration())) {
+            try (Admin admin = connection.getAdmin()) {
+              admin.shutdown();
+            }
+          }
+          LOG.info("After call to shutdown master");
+          cluster.waitOnMaster(MASTER_INDEX);
         } catch (Exception e) {
         }
       };
     };
     shutdownThread.start();
+    LOG.info("Called master join on " + master.getName());
     master.join();
     shutdownThread.join();
 
@@ -131,10 +147,8 @@ public class TestMasterShutdown {
     // make sure all the masters properly shutdown
     assertEquals(0, masterThreads.size());
 
-    TEST_UTIL.shutdownMiniZKCluster();
-    TEST_UTIL.shutdownMiniDFSCluster();
-    TEST_UTIL.cleanupTestDir();
+    util.shutdownMiniZKCluster();
+    util.shutdownMiniDFSCluster();
+    util.cleanupTestDir();
   }
-
 }
-

http://git-wip-us.apache.org/repos/asf/hbase/blob/c0cdaf84/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
index 3a33be8..6dde236 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
@@ -159,7 +159,7 @@ public class TestRestartCluster {
     // We don't have to use SnapshotOfRegionAssignmentFromMeta.
     // We use it here because AM used to use it to load all user region placements
     SnapshotOfRegionAssignmentFromMeta snapshot = new SnapshotOfRegionAssignmentFromMeta(
-      master.getShortCircuitConnection());
+      master.getConnection());
     snapshot.initialize();
     Map<HRegionInfo, ServerName> regionToRegionServerMap
       = snapshot.getRegionToRegionServerMap();
@@ -224,7 +224,7 @@ public class TestRestartCluster {
       Threads.sleep(100);
     }
 
-    snapshot =new SnapshotOfRegionAssignmentFromMeta(master.getShortCircuitConnection());
+    snapshot =new SnapshotOfRegionAssignmentFromMeta(master.getConnection());
     snapshot.initialize();
     Map<HRegionInfo, ServerName> newRegionToRegionServerMap =
       snapshot.getRegionToRegionServerMap();

http://git-wip-us.apache.org/repos/asf/hbase/blob/c0cdaf84/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java
index 0e10ee7..7db9a50 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java
@@ -59,7 +59,7 @@ import org.apache.hadoop.hbase.SplitLogCounters;
 import org.apache.hadoop.hbase.SplitLogTask;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.Waiter;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination;
 import org.apache.hadoop.hbase.master.SplitLogManager.Task;
 import org.apache.hadoop.hbase.master.SplitLogManager.TaskBatch;
@@ -153,7 +153,7 @@ public class TestSplitLogManager {
     }
 
     @Override
-    public HConnection getShortCircuitConnection() {
+    public ClusterConnection getConnection() {
       return null;
     }
 
@@ -161,7 +161,6 @@ public class TestSplitLogManager {
     public MetaTableLocator getMetaTableLocator() {
       return null;
     }
-
   }
 
   static Stoppable stopper = new Stoppable() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/c0cdaf84/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
index 5f56d30..be0f80c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
@@ -35,7 +35,8 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.MediumTests;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.MediumTests;
 import org.apache.hadoop.hbase.util.EnvironmentEdge;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
@@ -216,7 +217,7 @@ public class TestHFileCleaner {
     }
 
     @Override
-    public HConnection getShortCircuitConnection() {
+    public ClusterConnection getConnection() {
       return null;
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/c0cdaf84/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
index dc63528..c352a47 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
@@ -34,7 +34,8 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.SmallTests;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.HFileArchiveUtil;
@@ -152,7 +153,7 @@ public class TestHFileLinkCleaner {
     }
 
     @Override
-    public HConnection getShortCircuitConnection() {
+    public ClusterConnection getConnection() {
       return null;
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/c0cdaf84/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
index c460518..c324bc7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
@@ -26,9 +26,14 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.*;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.replication.ReplicationFactory;
 import org.apache.hadoop.hbase.replication.ReplicationQueues;
 import org.apache.hadoop.hbase.replication.regionserver.Replication;
@@ -158,7 +163,7 @@ public class TestLogsCleaner {
     }
 
     @Override
-    public HConnection getShortCircuitConnection() {
+    public ClusterConnection getConnection() {
       return null;
     }
 
@@ -188,6 +193,4 @@ public class TestLogsCleaner {
       return false;
     }
   }
-
 }
-

http://git-wip-us.apache.org/repos/asf/hbase/blob/c0cdaf84/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java
index d0ebf8a..8f33de8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java
@@ -56,6 +56,7 @@ public class TestHRegionOnCluster {
   public void testDataCorrectnessReplayingRecoveredEdits() throws Exception {
     final int NUM_MASTERS = 1;
     final int NUM_RS = 3;
+    Admin hbaseAdmin = null;
     TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
 
     try {
@@ -67,7 +68,7 @@ public class TestHRegionOnCluster {
       // Create table
       HTableDescriptor desc = new HTableDescriptor(TABLENAME);
       desc.addFamily(new HColumnDescriptor(FAMILY));
-      Admin hbaseAdmin = TEST_UTIL.getHBaseAdmin();
+      hbaseAdmin = master.getConnection().getAdmin();
       hbaseAdmin.createTable(desc);
 
       assertTrue(hbaseAdmin.isTableAvailable(TABLENAME));
@@ -129,6 +130,7 @@ public class TestHRegionOnCluster {
       putDataAndVerify(table, "r4", FAMILY, "v4", 4);
 
     } finally {
+      if (hbaseAdmin != null) hbaseAdmin.close();
       TEST_UTIL.shutdownMiniCluster();
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c0cdaf84/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
index 871d18e..e079a0e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
@@ -33,8 +33,8 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.io.hfile.BlockCache;
-import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
 import org.apache.hadoop.hbase.io.hfile.CacheStats;
 import org.apache.hadoop.hbase.io.hfile.Cacheable;
@@ -314,7 +314,7 @@ public class TestHeapMemoryManager {
   private static class BlockCacheStub implements ResizableBlockCache {
     CacheStats stats = new CacheStats("test");
     long maxSize = 0;
-    
+
     public BlockCacheStub(long size){
       this.maxSize = size;
     }
@@ -476,7 +476,7 @@ public class TestHeapMemoryManager {
     }
 
     @Override
-    public HConnection getShortCircuitConnection() {
+    public ClusterConnection getConnection() {
       return null;
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/c0cdaf84/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
index 3352db1..34c9ec1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
@@ -177,13 +177,12 @@ public class TestRegionMergeTransactionOnCluster {
       table.close();
 
       List<Pair<HRegionInfo, ServerName>> tableRegions = MetaTableAccessor
-          .getTableRegionsAndLocations(master.getZooKeeper(),
-            master.getShortCircuitConnection(), tableName);
+          .getTableRegionsAndLocations(master.getZooKeeper(), master.getConnection(), tableName);
       HRegionInfo mergedRegionInfo = tableRegions.get(0).getFirst();
       HTableDescriptor tableDescritor = master.getTableDescriptors().get(
           tableName);
       Result mergedRegionResult = MetaTableAccessor.getRegionResult(
-        master.getShortCircuitConnection(), mergedRegionInfo.getRegionName());
+        master.getConnection(), mergedRegionInfo.getRegionName());
 
       // contains merge reference in META
       assertTrue(mergedRegionResult.getValue(HConstants.CATALOG_FAMILY,
@@ -226,7 +225,7 @@ public class TestRegionMergeTransactionOnCluster {
       assertFalse(fs.exists(regionBdir));
 
       mergedRegionResult = MetaTableAccessor.getRegionResult(
-        master.getShortCircuitConnection(), mergedRegionInfo.getRegionName());
+        master.getConnection(), mergedRegionInfo.getRegionName());
       assertFalse(mergedRegionResult.getValue(HConstants.CATALOG_FAMILY,
           HConstants.MERGEA_QUALIFIER) != null);
       assertFalse(mergedRegionResult.getValue(HConstants.CATALOG_FAMILY,
@@ -303,7 +302,7 @@ public class TestRegionMergeTransactionOnCluster {
       int regionAnum, int regionBnum) throws Exception {
     List<Pair<HRegionInfo, ServerName>> tableRegions = MetaTableAccessor
         .getTableRegionsAndLocations(master.getZooKeeper(),
-          master.getShortCircuitConnection(), tablename);
+          master.getConnection(), tablename);
     HRegionInfo regionA = tableRegions.get(regionAnum).getFirst();
     HRegionInfo regionB = tableRegions.get(regionBnum).getFirst();
     TEST_UTIL.getHBaseAdmin().mergeRegions(
@@ -318,8 +317,8 @@ public class TestRegionMergeTransactionOnCluster {
     List<HRegionInfo> tableRegionsInMaster;
     long timeout = System.currentTimeMillis() + waitTime;
     while (System.currentTimeMillis() < timeout) {
-      tableRegionsInMeta = MetaTableAccessor.getTableRegionsAndLocations(
-        master.getZooKeeper(), master.getShortCircuitConnection(), tablename);
+      tableRegionsInMeta = MetaTableAccessor.getTableRegionsAndLocations(master.getZooKeeper(),
+        master.getConnection(), tablename);
       tableRegionsInMaster = master.getAssignmentManager().getRegionStates()
           .getRegionsOfTable(tablename);
       if (tableRegionsInMeta.size() == expectedRegionNum
@@ -329,8 +328,8 @@ public class TestRegionMergeTransactionOnCluster {
       Thread.sleep(250);
     }
 
-    tableRegionsInMeta = MetaTableAccessor.getTableRegionsAndLocations(
-      master.getZooKeeper(), master.getShortCircuitConnection(), tablename);
+    tableRegionsInMeta = MetaTableAccessor.getTableRegionsAndLocations(master.getZooKeeper(),
+      master.getConnection(), tablename);
     LOG.info("Regions after merge:" + Joiner.on(',').join(tableRegionsInMeta));
     assertEquals(expectedRegionNum, tableRegionsInMeta.size());
   }
@@ -356,15 +355,16 @@ public class TestRegionMergeTransactionOnCluster {
     long timeout = System.currentTimeMillis() + waitTime;
     List<Pair<HRegionInfo, ServerName>> tableRegions;
     while (System.currentTimeMillis() < timeout) {
-      tableRegions = MetaTableAccessor.getTableRegionsAndLocations(
-        master.getZooKeeper(), master.getShortCircuitConnection(), tablename);
+      tableRegions = MetaTableAccessor.getTableRegionsAndLocations(master.getZooKeeper(),
+        master.getConnection(), tablename);
       if (tableRegions.size() == numRegions)
         break;
       Thread.sleep(250);
     }
 
     tableRegions = MetaTableAccessor.getTableRegionsAndLocations(
-      master.getZooKeeper(), master.getShortCircuitConnection(), tablename);
+      master.getZooKeeper(),
+      master.getConnection(), tablename);
     LOG.info("Regions after load: " + Joiner.on(',').join(tableRegions));
     assertEquals(numRegions, tableRegions.size());
     return table;

http://git-wip-us.apache.org/repos/asf/hbase/blob/c0cdaf84/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
index b58fb0f..a3f7e14 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
@@ -284,7 +284,7 @@ public class TestRegionServerNoMaster {
     try {
       // we re-opened meta so some of its data is lost
       ServerName sn = getRS().getServerName();
-      MetaTableAccessor.updateRegionLocation(getRS().getShortCircuitConnection(),
+      MetaTableAccessor.updateRegionLocation(getRS().getConnection(),
         hri, sn, getRS().getRegion(regionName).getOpenSeqNum());
       // fake region to be closing now, need to clear state afterwards
       getRS().regionsInTransitionInRS.put(hri.getEncodedNameAsBytes(), Boolean.FALSE);

http://git-wip-us.apache.org/repos/asf/hbase/blob/c0cdaf84/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java
index 5caa544..704d7f8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java
@@ -18,10 +18,11 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.CoreMatchers.not;
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertThat;
-import static org.hamcrest.CoreMatchers.*;
+import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -42,7 +43,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.SplitLogCounters;
 import org.apache.hadoop.hbase.SplitLogTask;
 import org.apache.hadoop.hbase.Waiter;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.executor.ExecutorType;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
@@ -127,7 +128,7 @@ public class TestSplitLogWorker {
     }
 
     @Override
-    public HConnection getShortCircuitConnection() {
+    public ClusterConnection getConnection() {
       return null;
     }
 
@@ -135,7 +136,6 @@ public class TestSplitLogWorker {
     public MetaTableLocator getMetaTableLocator() {
       return null;
     }
-
   }
 
   private void waitForCounter(AtomicLong ctr, long oldval, long newval, long timems)

http://git-wip-us.apache.org/repos/asf/hbase/blob/c0cdaf84/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
index dc1691f..e9212ad 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
@@ -331,33 +331,33 @@ public class TestSplitTransactionOnCluster {
     byte[] cf = Bytes.toBytes("cf");
     htd.addFamily(new HColumnDescriptor(cf));
     admin.createTable(htd);
-    
+
     for (int i = 0; cluster.getRegions(tableName).size() == 0 && i < 100; i++) {
       Thread.sleep(100);
     }
     assertEquals(1, cluster.getRegions(tableName).size());
-    
+
     HRegion region = cluster.getRegions(tableName).get(0);
     Store store = region.getStore(cf);
     int regionServerIndex = cluster.getServerWith(region.getRegionName());
     HRegionServer regionServer = cluster.getRegionServer(regionServerIndex);
-    
+
     Table t  = new HTable(conf, tableName);
     // insert data
     insertData(tableName, admin, t);
     insertData(tableName, admin, t);
-    
+
     int fileNum = store.getStorefiles().size();
     // 0, Compaction Request
     store.triggerMajorCompaction();
     CompactionContext cc = store.requestCompaction();
     assertNotNull(cc);
-    // 1, A timeout split 
-    // 1.1 close region 
+    // 1, A timeout split
+    // 1.1 close region
     assertEquals(2, region.close(false).get(cf).size());
     // 1.2 rollback and Region initialize again
     region.initialize();
-    
+
     // 2, Run Compaction cc
     assertFalse(region.compact(cc, store));
     assertTrue(fileNum > store.getStorefiles().size());
@@ -368,7 +368,7 @@ public class TestSplitTransactionOnCluster {
     st.execute(regionServer, regionServer);
     assertEquals(2, cluster.getRegions(tableName).size());
   }
-  
+
   public static class FailingSplitRegionObserver extends BaseRegionObserver {
     static volatile CountDownLatch latch = new CountDownLatch(1);
     @Override
@@ -900,7 +900,7 @@ public class TestSplitTransactionOnCluster {
       admin.setBalancerRunning(false, true);
       // Turn off the meta scanner so it don't remove parent on us.
       cluster.getMaster().setCatalogJanitorEnabled(false);
-      boolean tableExists = MetaTableAccessor.tableExists(regionServer.getShortCircuitConnection(),
+      boolean tableExists = MetaTableAccessor.tableExists(regionServer.getConnection(),
         tableName);
       assertEquals("The specified table should present.", true, tableExists);
       final HRegion region = findSplittableRegion(regions);
@@ -912,15 +912,10 @@ public class TestSplitTransactionOnCluster {
       } catch (IOException e) {
 
       }
-      tableExists = MetaTableAccessor.tableExists(regionServer.getShortCircuitConnection(),
+      tableExists = MetaTableAccessor.tableExists(regionServer.getConnection(),
         tableName);
       assertEquals("The specified table should present.", true, tableExists);
     } finally {
-      if (regions != null) {
-        String node = ZKAssign.getNodeName(zkw, regions.get(0).getRegionInfo()
-            .getEncodedName());
-        ZKUtil.deleteNodeFailSilent(zkw, node);
-      }
       admin.setBalancerRunning(true, false);
       cluster.getMaster().setCatalogJanitorEnabled(true);
       t.close();

http://git-wip-us.apache.org/repos/asf/hbase/blob/c0cdaf84/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java
index ddd9bcb..58493c4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java
@@ -32,11 +32,18 @@ import java.util.Map;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.LargeTests;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Table;
@@ -265,115 +272,127 @@ public class TestPerTableCFReplication {
   @Test(timeout=300000)
   public void testPerTableCFReplication() throws Exception {
     LOG.info("testPerTableCFReplication");
-    ReplicationAdmin admin1 = new ReplicationAdmin(conf1);
-
-    new HBaseAdmin(conf1).createTable(tabA);
-    new HBaseAdmin(conf1).createTable(tabB);
-    new HBaseAdmin(conf1).createTable(tabC);
-    new HBaseAdmin(conf2).createTable(tabA);
-    new HBaseAdmin(conf2).createTable(tabB);
-    new HBaseAdmin(conf2).createTable(tabC);
-    new HBaseAdmin(conf3).createTable(tabA);
-    new HBaseAdmin(conf3).createTable(tabB);
-    new HBaseAdmin(conf3).createTable(tabC);
-
-    Table htab1A = new HTable(conf1, tabAName);
-    Table htab2A = new HTable(conf2, tabAName);
-    Table htab3A = new HTable(conf3, tabAName);
-
-    Table htab1B = new HTable(conf1, tabBName);
-    Table htab2B = new HTable(conf2, tabBName);
-    Table htab3B = new HTable(conf3, tabBName);
-
-    Table htab1C = new HTable(conf1, tabCName);
-    Table htab2C = new HTable(conf2, tabCName);
-    Table htab3C = new HTable(conf3, tabCName);
-
-    // A. add cluster2/cluster3 as peers to cluster1
-    admin1.addPeer("2", utility2.getClusterKey(), "TC;TB:f1,f3");
-    admin1.addPeer("3", utility3.getClusterKey(), "TA;TB:f1,f2");
-
-    // A1. tableA can only replicated to cluster3
-    putAndWaitWithFamily(row1, f1Name, htab1A, htab3A);
-    ensureRowNotReplicated(row1, f1Name, htab2A);
-    deleteAndWaitWithFamily(row1, f1Name, htab1A, htab3A);
-
-    putAndWaitWithFamily(row1, f2Name, htab1A, htab3A);
-    ensureRowNotReplicated(row1, f2Name, htab2A);
-    deleteAndWaitWithFamily(row1, f2Name, htab1A, htab3A);
-
-    putAndWaitWithFamily(row1, f3Name, htab1A, htab3A);
-    ensureRowNotReplicated(row1, f3Name, htab2A);
-    deleteAndWaitWithFamily(row1, f3Name, htab1A, htab3A);
-
-    // A2. cf 'f1' of tableB can replicated to both cluster2 and cluster3
-    putAndWaitWithFamily(row1, f1Name, htab1B, htab2B, htab3B);
-    deleteAndWaitWithFamily(row1, f1Name, htab1B, htab2B, htab3B);
-
-    //  cf 'f2' of tableB can only replicated to cluster3
-    putAndWaitWithFamily(row1, f2Name, htab1B, htab3B);
-    ensureRowNotReplicated(row1, f2Name, htab2B);
-    deleteAndWaitWithFamily(row1, f2Name, htab1B, htab3B);
-
-    //  cf 'f3' of tableB can only replicated to cluster2
-    putAndWaitWithFamily(row1, f3Name, htab1B, htab2B);
-    ensureRowNotReplicated(row1, f3Name, htab3B);
-    deleteAndWaitWithFamily(row1, f3Name, htab1B, htab2B);
-
-    // A3. tableC can only replicated to cluster2
-    putAndWaitWithFamily(row1, f1Name, htab1C, htab2C);
-    ensureRowNotReplicated(row1, f1Name, htab3C);
-    deleteAndWaitWithFamily(row1, f1Name, htab1C, htab2C);
-
-    putAndWaitWithFamily(row1, f2Name, htab1C, htab2C);
-    ensureRowNotReplicated(row1, f2Name, htab3C);
-    deleteAndWaitWithFamily(row1, f2Name, htab1C, htab2C);
-
-    putAndWaitWithFamily(row1, f3Name, htab1C, htab2C);
-    ensureRowNotReplicated(row1, f3Name, htab3C);
-    deleteAndWaitWithFamily(row1, f3Name, htab1C, htab2C);
-
-    // B. change peers' replicable table-cf config
-    admin1.setPeerTableCFs("2", "TA:f1,f2; TC:f2,f3");
-    admin1.setPeerTableCFs("3", "TB; TC:f3");
-
-    // B1. cf 'f1' of tableA can only replicated to cluster2
-    putAndWaitWithFamily(row2, f1Name, htab1A, htab2A);
-    ensureRowNotReplicated(row2, f1Name, htab3A);
-    deleteAndWaitWithFamily(row2, f1Name, htab1A, htab2A);
-    //     cf 'f2' of tableA can only replicated to cluster2
-    putAndWaitWithFamily(row2, f2Name, htab1A, htab2A);
-    ensureRowNotReplicated(row2, f2Name, htab3A);
-    deleteAndWaitWithFamily(row2, f2Name, htab1A, htab2A);
-    //     cf 'f3' of tableA isn't replicable to either cluster2 or cluster3
-    putAndWaitWithFamily(row2, f3Name, htab1A);
-    ensureRowNotReplicated(row2, f3Name, htab2A, htab3A);
-    deleteAndWaitWithFamily(row2, f3Name, htab1A);
-
-    // B2. tableB can only replicated to cluster3
-    putAndWaitWithFamily(row2, f1Name, htab1B, htab3B);
-    ensureRowNotReplicated(row2, f1Name, htab2B);
-    deleteAndWaitWithFamily(row2, f1Name, htab1B, htab3B);
-
-    putAndWaitWithFamily(row2, f2Name, htab1B, htab3B);
-    ensureRowNotReplicated(row2, f2Name, htab2B);
-    deleteAndWaitWithFamily(row2, f2Name, htab1B, htab3B);
-
-    putAndWaitWithFamily(row2, f3Name, htab1B, htab3B);
-    ensureRowNotReplicated(row2, f3Name, htab2B);
-    deleteAndWaitWithFamily(row2, f3Name, htab1B, htab3B);
-
-    // B3. cf 'f1' of tableC non-replicable to either cluster
-    putAndWaitWithFamily(row2, f1Name, htab1C);
-    ensureRowNotReplicated(row2, f1Name, htab2C, htab3C);
-    deleteAndWaitWithFamily(row2, f1Name, htab1C);
-    //     cf 'f2' of tableC can only replicated to cluster2
-    putAndWaitWithFamily(row2, f2Name, htab1C, htab2C);
-    ensureRowNotReplicated(row2, f2Name, htab3C);
-    deleteAndWaitWithFamily(row2, f2Name, htab1C, htab2C);
-    //     cf 'f3' of tableC can replicated to cluster2 and cluster3
-    putAndWaitWithFamily(row2, f3Name, htab1C, htab2C, htab3C);
-    deleteAndWaitWithFamily(row2, f3Name, htab1C, htab2C, htab3C);
+    ReplicationAdmin replicationAdmin = new ReplicationAdmin(conf1);
+    Connection connection1 = ConnectionFactory.createConnection(conf1);
+    Connection connection2 = ConnectionFactory.createConnection(conf2);
+    Connection connection3 = ConnectionFactory.createConnection(conf3);
+    try {
+      Admin admin1 = connection1.getAdmin();
+      Admin admin2 = connection2.getAdmin();
+      Admin admin3 = connection3.getAdmin();
+
+      admin1.createTable(tabA);
+      admin1.createTable(tabB);
+      admin1.createTable(tabC);
+      admin2.createTable(tabA);
+      admin2.createTable(tabB);
+      admin2.createTable(tabC);
+      admin3.createTable(tabA);
+      admin3.createTable(tabB);
+      admin3.createTable(tabC);
+
+      Table htab1A = connection1.getTable(tabAName);
+      Table htab2A = connection2.getTable(tabAName);
+      Table htab3A = connection3.getTable(tabAName);
+
+      Table htab1B = connection1.getTable(tabBName);
+      Table htab2B = connection2.getTable(tabBName);
+      Table htab3B = connection3.getTable(tabBName);
+
+      Table htab1C = connection1.getTable(tabCName);
+      Table htab2C = connection2.getTable(tabCName);
+      Table htab3C = connection3.getTable(tabCName);
+
+      // A. add cluster2/cluster3 as peers to cluster1
+      replicationAdmin.addPeer("2", utility2.getClusterKey(), "TC;TB:f1,f3");
+      replicationAdmin.addPeer("3", utility3.getClusterKey(), "TA;TB:f1,f2");
+
+      // A1. tableA can only replicated to cluster3
+      putAndWaitWithFamily(row1, f1Name, htab1A, htab3A);
+      ensureRowNotReplicated(row1, f1Name, htab2A);
+      deleteAndWaitWithFamily(row1, f1Name, htab1A, htab3A);
+
+      putAndWaitWithFamily(row1, f2Name, htab1A, htab3A);
+      ensureRowNotReplicated(row1, f2Name, htab2A);
+      deleteAndWaitWithFamily(row1, f2Name, htab1A, htab3A);
+
+      putAndWaitWithFamily(row1, f3Name, htab1A, htab3A);
+      ensureRowNotReplicated(row1, f3Name, htab2A);
+      deleteAndWaitWithFamily(row1, f3Name, htab1A, htab3A);
+
+      // A2. cf 'f1' of tableB can replicated to both cluster2 and cluster3
+      putAndWaitWithFamily(row1, f1Name, htab1B, htab2B, htab3B);
+      deleteAndWaitWithFamily(row1, f1Name, htab1B, htab2B, htab3B);
+
+      //  cf 'f2' of tableB can only replicated to cluster3
+      putAndWaitWithFamily(row1, f2Name, htab1B, htab3B);
+      ensureRowNotReplicated(row1, f2Name, htab2B);
+      deleteAndWaitWithFamily(row1, f2Name, htab1B, htab3B);
+
+      //  cf 'f3' of tableB can only replicated to cluster2
+      putAndWaitWithFamily(row1, f3Name, htab1B, htab2B);
+      ensureRowNotReplicated(row1, f3Name, htab3B);
+      deleteAndWaitWithFamily(row1, f3Name, htab1B, htab2B);
+
+      // A3. tableC can only replicated to cluster2
+      putAndWaitWithFamily(row1, f1Name, htab1C, htab2C);
+      ensureRowNotReplicated(row1, f1Name, htab3C);
+      deleteAndWaitWithFamily(row1, f1Name, htab1C, htab2C);
+
+      putAndWaitWithFamily(row1, f2Name, htab1C, htab2C);
+      ensureRowNotReplicated(row1, f2Name, htab3C);
+      deleteAndWaitWithFamily(row1, f2Name, htab1C, htab2C);
+
+      putAndWaitWithFamily(row1, f3Name, htab1C, htab2C);
+      ensureRowNotReplicated(row1, f3Name, htab3C);
+      deleteAndWaitWithFamily(row1, f3Name, htab1C, htab2C);
+
+      // B. change peers' replicable table-cf config
+      replicationAdmin.setPeerTableCFs("2", "TA:f1,f2; TC:f2,f3");
+      replicationAdmin.setPeerTableCFs("3", "TB; TC:f3");
+
+      // B1. cf 'f1' of tableA can only replicated to cluster2
+      putAndWaitWithFamily(row2, f1Name, htab1A, htab2A);
+      ensureRowNotReplicated(row2, f1Name, htab3A);
+      deleteAndWaitWithFamily(row2, f1Name, htab1A, htab2A);
+      //     cf 'f2' of tableA can only replicated to cluster2
+      putAndWaitWithFamily(row2, f2Name, htab1A, htab2A);
+      ensureRowNotReplicated(row2, f2Name, htab3A);
+      deleteAndWaitWithFamily(row2, f2Name, htab1A, htab2A);
+      //     cf 'f3' of tableA isn't replicable to either cluster2 or cluster3
+      putAndWaitWithFamily(row2, f3Name, htab1A);
+      ensureRowNotReplicated(row2, f3Name, htab2A, htab3A);
+      deleteAndWaitWithFamily(row2, f3Name, htab1A);
+
+      // B2. tableB can only replicated to cluster3
+      putAndWaitWithFamily(row2, f1Name, htab1B, htab3B);
+      ensureRowNotReplicated(row2, f1Name, htab2B);
+      deleteAndWaitWithFamily(row2, f1Name, htab1B, htab3B);
+
+      putAndWaitWithFamily(row2, f2Name, htab1B, htab3B);
+      ensureRowNotReplicated(row2, f2Name, htab2B);
+      deleteAndWaitWithFamily(row2, f2Name, htab1B, htab3B);
+
+      putAndWaitWithFamily(row2, f3Name, htab1B, htab3B);
+      ensureRowNotReplicated(row2, f3Name, htab2B);
+      deleteAndWaitWithFamily(row2, f3Name, htab1B, htab3B);
+
+      // B3. cf 'f1' of tableC non-replicable to either cluster
+      putAndWaitWithFamily(row2, f1Name, htab1C);
+      ensureRowNotReplicated(row2, f1Name, htab2C, htab3C);
+      deleteAndWaitWithFamily(row2, f1Name, htab1C);
+      //     cf 'f2' of tableC can only replicated to cluster2
+      putAndWaitWithFamily(row2, f2Name, htab1C, htab2C);
+      ensureRowNotReplicated(row2, f2Name, htab3C);
+      deleteAndWaitWithFamily(row2, f2Name, htab1C, htab2C);
+      //     cf 'f3' of tableC can replicated to cluster2 and cluster3
+      putAndWaitWithFamily(row2, f3Name, htab1C, htab2C, htab3C);
+      deleteAndWaitWithFamily(row2, f3Name, htab1C, htab2C, htab3C);
+    } finally {
+      connection1.close();
+      connection2.close();
+      connection3.close();
+    }
  }
 
   private void ensureRowNotReplicated(byte[] row, byte[] fam, Table... tables) throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/c0cdaf84/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java
index f0412e1..29cc8ff 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java
@@ -18,6 +18,9 @@
 
 package org.apache.hadoop.hbase.replication;
 
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 import java.io.IOException;
 
 import org.apache.commons.logging.Log;
@@ -30,7 +33,8 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.MediumTests;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.MediumTests;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@@ -38,12 +42,9 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
 import org.junit.After;
 import org.junit.AfterClass;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
 import org.junit.Before;
 import org.junit.BeforeClass;
+import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 @Category(MediumTests.class)
@@ -149,7 +150,7 @@ public class TestReplicationStateZKImpl extends TestReplicationStateBasic {
     }
 
     @Override
-    public HConnection getShortCircuitConnection() {
+    public ClusterConnection getConnection() {
       return null;
     }
 
@@ -185,4 +186,3 @@ public class TestReplicationStateZKImpl extends TestReplicationStateBasic {
     }
   }
 }
-

http://git-wip-us.apache.org/repos/asf/hbase/blob/c0cdaf84/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
index 55de565..29a9548 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
@@ -18,6 +18,10 @@
 
 package org.apache.hadoop.hbase.replication;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
 import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -26,25 +30,22 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ClusterId;
+import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.MediumTests;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.CoordinatedStateManager;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.MediumTests;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.junit.AfterClass;
-import org.junit.Test;
-import org.junit.Ignore;
-
-import static org.junit.Assert.*;
-
 import org.junit.Before;
 import org.junit.BeforeClass;
+import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 /**
@@ -181,7 +182,7 @@ public class TestReplicationTrackerZKImpl {
     int exists = 0;
     int hyphen = 0;
     rp.addPeer("6", new ReplicationPeerConfig().setClusterKey(utility.getClusterKey()), null);
-    
+
     try{
       rp.addPeer("6", new ReplicationPeerConfig().setClusterKey(utility.getClusterKey()), null);
     }catch(IllegalArgumentException e){
@@ -195,11 +196,11 @@ public class TestReplicationTrackerZKImpl {
     }
     assertEquals(1, exists);
     assertEquals(1, hyphen);
-    
+
     // clean up
     rp.removePeer("6");
   }
-  
+
   private class DummyReplicationListener implements ReplicationListener {
 
     @Override
@@ -250,7 +251,7 @@ public class TestReplicationTrackerZKImpl {
     }
 
     @Override
-    public HConnection getShortCircuitConnection() {
+    public ClusterConnection getConnection() {
       return null;
     }
 
@@ -286,4 +287,3 @@ public class TestReplicationTrackerZKImpl {
     }
   }
 }
-

http://git-wip-us.apache.org/repos/asf/hbase/blob/c0cdaf84/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
index a85878c..5e1e75c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
@@ -51,6 +51,8 @@ import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
 import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
@@ -115,7 +117,7 @@ public class TestReplicationSourceManager {
   private static Path oldLogDir;
 
   private static Path logDir;
-  
+
   private static CountDownLatch latch;
 
   private static List<String> files = new ArrayList<String>();
@@ -153,8 +155,6 @@ public class TestReplicationSourceManager {
         HConstants.HREGION_LOGDIR_NAME);
     replication = new Replication(new DummyServer(), fs, logDir, oldLogDir);
     manager = replication.getReplicationManager();
-    
-    logName = HConstants.HREGION_LOGDIR_NAME;
 
     manager.addSource(slaveId);
 
@@ -239,7 +239,7 @@ public class TestReplicationSourceManager {
 
     // TODO Need a case with only 2 HLogs and we only want to delete the first one
   }
-  
+
   @Test
   public void testClaimQueues() throws Exception {
     LOG.debug("testNodeFailoverWorkerCopyQueuesFromRSUsingMulti");
@@ -282,7 +282,7 @@ public class TestReplicationSourceManager {
     assertEquals(1, populatedMap);
     server.abort("", null);
   }
-  
+
   @Test
   public void testCleanupFailoverQueues() throws Exception {
     final Server server = new DummyServer("hostname1.example.org");
@@ -362,8 +362,8 @@ public class TestReplicationSourceManager {
 
     server.abort("", null);
   }
-  
-  
+
+
   static class DummyNodeFailoverWorker extends Thread {
     private SortedMap<String, SortedSet<String>> logZnodesMap;
     Server server;
@@ -412,7 +412,7 @@ public class TestReplicationSourceManager {
       return 0;
     }
   }
-  
+
   static class DummyServer implements Server {
     String hostname;
 
@@ -439,7 +439,7 @@ public class TestReplicationSourceManager {
       return null;
     }
     @Override
-    public HConnection getShortCircuitConnection() {
+    public ClusterConnection getConnection() {
       return null;
     }
 
@@ -473,6 +473,4 @@ public class TestReplicationSourceManager {
       return false; // To change body of implemented methods use File | Settings | File Templates.
     }
   }
-
 }
-

http://git-wip-us.apache.org/repos/asf/hbase/blob/c0cdaf84/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index cf1c7bc..724fb47 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -25,8 +25,6 @@ import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.util.List;
-import java.util.Map;
-import java.util.NavigableMap;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -42,6 +40,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.LargeTests;
@@ -60,6 +59,7 @@ import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
@@ -432,20 +432,19 @@ public class TestAccessController extends SecureTestUtil {
 
   @Test
   public void testMove() throws Exception {
-    Map<HRegionInfo, ServerName> regions;
-    HTable table = new HTable(TEST_UTIL.getConfiguration(), TEST_TABLE.getTableName());
-    try {
-      regions = table.getRegionLocations();
-    } finally {
-      table.close();
+    List<HRegionLocation> regions;
+    try (RegionLocator locator =
+      TEST_UTIL.getConnection().getRegionLocator(TEST_TABLE.getTableName())) {
+      regions = locator.getAllRegionLocations();
     }
-    final Map.Entry<HRegionInfo, ServerName> firstRegion = regions.entrySet().iterator().next();
-    final ServerName server = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName();
+    HRegionLocation location = regions.get(0);
+    final HRegionInfo hri = location.getRegionInfo();
+    final ServerName server = location.getServerName();
     AccessTestAction action = new AccessTestAction() {
       @Override
       public Object run() throws Exception {
         ACCESS_CONTROLLER.preMove(ObserverContext.createAndPrepare(CP_ENV, null),
-          firstRegion.getKey(), server, server);
+          hri, server, server);
         return null;
       }
     };
@@ -456,20 +455,17 @@ public class TestAccessController extends SecureTestUtil {
 
   @Test
   public void testAssign() throws Exception {
-    Map<HRegionInfo, ServerName> regions;
-    HTable table = new HTable(TEST_UTIL.getConfiguration(), TEST_TABLE.getTableName());
-    try {
-      regions = table.getRegionLocations();
-    } finally {
-      table.close();
+    List<HRegionLocation> regions;
+    try (RegionLocator locator =
+      TEST_UTIL.getConnection().getRegionLocator(TEST_TABLE.getTableName())) {
+      regions = locator.getAllRegionLocations();
     }
-    final Map.Entry<HRegionInfo, ServerName> firstRegion = regions.entrySet().iterator().next();
-
+    HRegionLocation location = regions.get(0);
+    final HRegionInfo hri = location.getRegionInfo();
     AccessTestAction action = new AccessTestAction() {
       @Override
       public Object run() throws Exception {
-        ACCESS_CONTROLLER.preAssign(ObserverContext.createAndPrepare(CP_ENV, null),
-          firstRegion.getKey());
+        ACCESS_CONTROLLER.preAssign(ObserverContext.createAndPrepare(CP_ENV, null), hri);
         return null;
       }
     };
@@ -480,20 +476,17 @@ public class TestAccessController extends SecureTestUtil {
 
   @Test
   public void testUnassign() throws Exception {
-    Map<HRegionInfo, ServerName> regions;
-    HTable table = new HTable(TEST_UTIL.getConfiguration(), TEST_TABLE.getTableName());
-    try {
-      regions = table.getRegionLocations();
-    } finally {
-      table.close();
+    List<HRegionLocation> regions;
+    try (RegionLocator locator =
+      TEST_UTIL.getConnection().getRegionLocator(TEST_TABLE.getTableName())) {
+      regions = locator.getAllRegionLocations();
     }
-    final Map.Entry<HRegionInfo, ServerName> firstRegion = regions.entrySet().iterator().next();
-
+    HRegionLocation location = regions.get(0);
+    final HRegionInfo hri = location.getRegionInfo();
     AccessTestAction action = new AccessTestAction() {
       @Override
       public Object run() throws Exception {
-        ACCESS_CONTROLLER.preUnassign(ObserverContext.createAndPrepare(CP_ENV, null),
-          firstRegion.getKey(), false);
+        ACCESS_CONTROLLER.preUnassign(ObserverContext.createAndPrepare(CP_ENV, null), hri, false);
         return null;
       }
     };
@@ -504,20 +497,17 @@ public class TestAccessController extends SecureTestUtil {
 
   @Test
   public void testRegionOffline() throws Exception {
-    Map<HRegionInfo, ServerName> regions;
-    HTable table = new HTable(TEST_UTIL.getConfiguration(), TEST_TABLE.getTableName());
-    try {
-      regions = table.getRegionLocations();
-    } finally {
-      table.close();
+    List<HRegionLocation> regions;
+    try (RegionLocator locator =
+      TEST_UTIL.getConnection().getRegionLocator(TEST_TABLE.getTableName())) {
+      regions = locator.getAllRegionLocations();
     }
-    final Map.Entry<HRegionInfo, ServerName> firstRegion = regions.entrySet().iterator().next();
-
+    HRegionLocation location = regions.get(0);
+    final HRegionInfo hri = location.getRegionInfo();
     AccessTestAction action = new AccessTestAction() {
       @Override
       public Object run() throws Exception {
-        ACCESS_CONTROLLER.preRegionOffline(ObserverContext.createAndPrepare(CP_ENV, null),
-          firstRegion.getKey());
+        ACCESS_CONTROLLER.preRegionOffline(ObserverContext.createAndPrepare(CP_ENV, null), hri);
         return null;
       }
     };
@@ -918,14 +908,12 @@ public class TestAccessController extends SecureTestUtil {
       //set global read so RegionServer can move it
       setPermission(loadPath, FsPermission.valueOf("-rwxrwxrwx"));
 
-      HTable table = new HTable(conf, tableName);
-      try {
-        Admin admin = TEST_UTIL.getHBaseAdmin();
-        TEST_UTIL.waitTableEnabled(admin, tableName.getName());
-        LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
-        loader.doBulkLoad(loadPath, table);
-      } finally {
-        table.close();
+      try (HTable table = (HTable)TEST_UTIL.getConnection().getTable(tableName)) {
+        try (Admin admin = TEST_UTIL.getHBaseAdmin()) {
+          TEST_UTIL.waitTableEnabled(admin, tableName.getName());
+          LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
+          loader.doBulkLoad(loadPath, table);
+        }
       }
     }
 
@@ -1922,18 +1910,19 @@ public class TestAccessController extends SecureTestUtil {
     final HRegionServer newRs = newRsThread.getRegionServer();
 
     // Move region to the new RegionServer.
-    final HTable table = new HTable(TEST_UTIL.getConfiguration(), TEST_TABLE2);
-    try {
-      NavigableMap<HRegionInfo, ServerName> regions = table
-          .getRegionLocations();
-      final Map.Entry<HRegionInfo, ServerName> firstRegion = regions.entrySet()
-          .iterator().next();
-
+    List<HRegionLocation> regions;
+    try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(TEST_TABLE2)) {
+      regions = locator.getAllRegionLocations();
+    }
+    HRegionLocation location = regions.get(0);
+    final HRegionInfo hri = location.getRegionInfo();
+    final ServerName server = location.getServerName();
+    try (HTable table = (HTable)TEST_UTIL.getConnection().getTable(TEST_TABLE2)) {
       AccessTestAction moveAction = new AccessTestAction() {
         @Override
         public Object run() throws Exception {
-          admin.move(firstRegion.getKey().getEncodedNameAsBytes(),
-              Bytes.toBytes(newRs.getServerName().getServerName()));
+          admin.move(hri.getEncodedNameAsBytes(),
+            Bytes.toBytes(newRs.getServerName().getServerName()));
           return null;
         }
       };
@@ -1965,8 +1954,6 @@ public class TestAccessController extends SecureTestUtil {
         }
       };
       USER_ADMIN.runAs(putAction);
-    } finally {
-      table.close();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/c0cdaf84/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java
index acf000b2..8ffc5a1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java
@@ -17,7 +17,9 @@
  */
 package org.apache.hadoop.hbase.security.access;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
 
 import java.util.List;
 
@@ -27,7 +29,8 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.LargeTests;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
@@ -76,7 +79,7 @@ public class TestAccessController2 extends SecureTestUtil {
   @Test
   public void testCreateWithCorrectOwner() throws Exception {
     // Create a test user
-    User testUser = User.createUserForTesting(TEST_UTIL.getConfiguration(), "TestUser",
+    final User testUser = User.createUserForTesting(TEST_UTIL.getConfiguration(), "TestUser",
       new String[0]);
     // Grant the test user the ability to create tables
     SecureTestUtil.grantGlobal(TEST_UTIL, testUser.getShortName(), Action.CREATE);
@@ -85,11 +88,11 @@ public class TestAccessController2 extends SecureTestUtil {
       public Object run() throws Exception {
         HTableDescriptor desc = new HTableDescriptor(TEST_TABLE.getTableName());
         desc.addFamily(new HColumnDescriptor(TEST_FAMILY));
-        Admin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
-        try {
-          admin.createTable(desc);
-        } finally {
-          admin.close();
+        try (Connection connection =
+            ConnectionFactory.createConnection(TEST_UTIL.getConfiguration(), testUser)) {
+          try (Admin admin = connection.getAdmin()) {
+            admin.createTable(desc);
+          }
         }
         return null;
       }
@@ -97,7 +100,8 @@ public class TestAccessController2 extends SecureTestUtil {
     TEST_UTIL.waitTableEnabled(TEST_TABLE.getTableName());
     // Verify that owner permissions have been granted to the test user on the
     // table just created
-    List<TablePermission> perms = AccessControlLists.getTablePermissions(conf, TEST_TABLE.getTableName())
+    List<TablePermission> perms =
+      AccessControlLists.getTablePermissions(conf, TEST_TABLE.getTableName())
        .get(testUser.getShortName());
     assertNotNull(perms);
     assertFalse(perms.isEmpty());

http://git-wip-us.apache.org/repos/asf/hbase/blob/c0cdaf84/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
index 1773027..970ab48 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.hbase.security.token;
 
-import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
@@ -42,7 +42,7 @@ import org.apache.hadoop.hbase.MediumTests;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.ipc.BlockingRpcCallback;
@@ -145,7 +145,7 @@ public class TestTokenAuthentication {
     }
 
     @Override
-    public HConnection getShortCircuitConnection() {
+    public ClusterConnection getConnection() {
       return null;
     }
 
@@ -316,7 +316,6 @@ public class TestTokenAuthentication {
     }
   }
 
-
   private static HBaseTestingUtility TEST_UTIL;
   private static TokenServer server;
   private static Thread serverThread;

http://git-wip-us.apache.org/repos/asf/hbase/blob/c0cdaf84/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
index ca83eb2..018a417 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 
@@ -102,7 +102,7 @@ public class MockServer implements Server {
   }
 
   @Override
-  public HConnection getShortCircuitConnection() {
+  public ClusterConnection getConnection() {
     return null;
   }
 
@@ -121,4 +121,4 @@ public class MockServer implements Server {
     // TODO Auto-generated method stub
     return this.aborted;
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/c0cdaf84/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
index e7f5898..0992287 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
@@ -224,14 +224,9 @@ public class TestHBaseFsck {
   @Test(timeout=180000)
   public void testFixAssignmentsWhenMETAinTransition() throws Exception {
     MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
-    Admin admin = null;
-    try {
-      admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
-      admin.closeRegion(cluster.getServerHoldingMeta(),
-          HRegionInfo.FIRST_META_REGIONINFO);
-    } finally {
-      if (admin != null) {
-        admin.close();
+    try (Connection connection = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) {
+      try (Admin admin = connection.getAdmin()) {
+        admin.closeRegion(cluster.getServerHoldingMeta(), HRegionInfo.FIRST_META_REGIONINFO);
       }
     }
     regionStates.regionOffline(HRegionInfo.FIRST_META_REGIONINFO);
@@ -382,8 +377,7 @@ public class TestHBaseFsck {
     HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM));
     desc.addFamily(hcd); // If a table has no CF's it doesn't get checked
     TEST_UTIL.getHBaseAdmin().createTable(desc, SPLITS);
-    tbl = new HTable(TEST_UTIL.getConfiguration(), tablename, executorService);
-
+    tbl = (HTable)TEST_UTIL.getConnection().getTable(tablename, executorService);
     List<Put> puts = new ArrayList<Put>();
     for (byte[] row : ROWKEYS) {
       Put p = new Put(row);
@@ -729,8 +723,7 @@ public class TestHBaseFsck {
    */
   @Test
   public void testDegenerateRegions() throws Exception {
-    TableName table =
-        TableName.valueOf("tableDegenerateRegions");
+    TableName table = TableName.valueOf("tableDegenerateRegions");
     try {
       setupTable(table);
       assertNoErrors(doFsck(conf,false));

http://git-wip-us.apache.org/repos/asf/hbase/blob/c0cdaf84/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java
index 89e5f43..7ac4494 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java
@@ -41,9 +41,10 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
@@ -80,13 +81,14 @@ public class OfflineMetaRebuildTestCore {
   private final static byte[] FAM = Bytes.toBytes("fam");
 
   // for the instance, reset every test run
-  protected HTable htbl;
+  protected Table htbl;
   protected final static byte[][] splits = new byte[][] { Bytes.toBytes("A"),
       Bytes.toBytes("B"), Bytes.toBytes("C") };
 
   private final static String TABLE_BASE = "tableMetaRebuild";
   private static int tableIdx = 0;
   protected TableName table = TableName.valueOf("tableMetaRebuild");
+  protected Connection connection;
 
   @Before
   public void setUpBefore() throws Exception {
@@ -94,6 +96,7 @@ public class OfflineMetaRebuildTestCore {
     TEST_UTIL.getConfiguration().setInt("dfs.datanode.max.xceivers", 9192);
     TEST_UTIL.startMiniCluster(3);
     conf = TEST_UTIL.getConfiguration();
+    this.connection = ConnectionFactory.createConnection(conf);
     assertEquals(0, TEST_UTIL.getHBaseAdmin().listTables().length);
 
     // setup the table
@@ -111,8 +114,12 @@ public class OfflineMetaRebuildTestCore {
 
   @After
   public void tearDownAfter() throws Exception {
+    if (this.htbl != null) {
+      this.htbl.close();
+      this.htbl = null;
+    }
+    this.connection.close();
     TEST_UTIL.shutdownMiniCluster();
-    HConnectionManager.deleteConnection(conf);
   }
 
   /**
@@ -122,12 +129,12 @@ public class OfflineMetaRebuildTestCore {
    * @throws InterruptedException
    * @throws KeeperException
    */
-  private HTable setupTable(TableName tablename) throws Exception {
+  private Table setupTable(TableName tablename) throws Exception {
     HTableDescriptor desc = new HTableDescriptor(tablename);
     HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM));
     desc.addFamily(hcd); // If a table has no CF's it doesn't get checked
     TEST_UTIL.getHBaseAdmin().createTable(desc, splits);
-    return new HTable(TEST_UTIL.getConfiguration(), tablename);
+    return this.connection.getTable(tablename);
   }
 
   private void dumpMeta(HTableDescriptor htd) throws IOException {
@@ -166,14 +173,14 @@ public class OfflineMetaRebuildTestCore {
     }
   }
 
-  protected void deleteRegion(Configuration conf, final HTable tbl,
+  protected void deleteRegion(Configuration conf, final Table tbl,
       byte[] startKey, byte[] endKey) throws IOException {
 
     LOG.info("Before delete:");
     HTableDescriptor htd = tbl.getTableDescriptor();
     dumpMeta(htd);
 
-    Map<HRegionInfo, ServerName> hris = tbl.getRegionLocations();
+    Map<HRegionInfo, ServerName> hris = ((HTable)tbl).getRegionLocations();
     for (Entry<HRegionInfo, ServerName> e : hris.entrySet()) {
       HRegionInfo hri = e.getKey();
       ServerName hsa = e.getValue();
@@ -191,10 +198,10 @@ public class OfflineMetaRebuildTestCore {
             hri.getEncodedName());
         fs.delete(p, true);
 
-        Table meta = new HTable(conf, TableName.META_TABLE_NAME);
-        Delete delete = new Delete(deleteRow);
-        meta.delete(delete);
-        meta.close();
+        try (Table meta = this.connection.getTable(TableName.META_TABLE_NAME)) {
+          Delete delete = new Delete(deleteRow);
+          meta.delete(delete);
+        }
       }
       LOG.info(hri.toString() + hsa.toString());
     }
@@ -288,4 +295,14 @@ public class OfflineMetaRebuildTestCore {
     meta.close();
     return count;
   }
+
+  protected HTableDescriptor[] getTables(final Configuration configuration) throws IOException {
+    HTableDescriptor[] htbls = null;
+    try (Connection connection = ConnectionFactory.createConnection(configuration)) {
+      try (Admin admin = connection.getAdmin()) {
+        htbls = admin.listTables();
+      }
+    }
+    return htbls;
+  }
 }


Mime
View raw message