hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject [5/6] hbase git commit: HBASE-12404 Task 5 from parent: Replace internal HTable constructor use with HConnection#getTable (0.98, 0.99)
Date Tue, 25 Nov 2014 16:20:03 GMT
http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java
index 5ca5231..2818c24 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java
@@ -26,7 +26,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.coprocessor.Batch;
 import org.apache.hadoop.hbase.ipc.BlockingRpcCallback;
@@ -73,40 +74,39 @@ public class VisibilityClient {
    */
   public static VisibilityLabelsResponse addLabels(Configuration conf, final String[] labels)
       throws Throwable {
-    Table ht = null;
-    try {
-      ht = new HTable(conf, LABELS_TABLE_NAME);
-      Batch.Call<VisibilityLabelsService, VisibilityLabelsResponse> callable = 
-          new Batch.Call<VisibilityLabelsService, VisibilityLabelsResponse>() {
-        ServerRpcController controller = new ServerRpcController();
-        BlockingRpcCallback<VisibilityLabelsResponse> rpcCallback = 
-            new BlockingRpcCallback<VisibilityLabelsResponse>();
+    // TODO: Make it so caller passes in a Connection rather than have us do this expensive
+    // setup each time.  This class only used in test and shell at moment though.
+    try (Connection connection = ConnectionFactory.createConnection(conf)) {
+      try (Table table = connection.getTable(LABELS_TABLE_NAME)) {
+        Batch.Call<VisibilityLabelsService, VisibilityLabelsResponse> callable = 
+            new Batch.Call<VisibilityLabelsService, VisibilityLabelsResponse>() {
+          ServerRpcController controller = new ServerRpcController();
+          BlockingRpcCallback<VisibilityLabelsResponse> rpcCallback = 
+              new BlockingRpcCallback<VisibilityLabelsResponse>();
 
-        public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException {
-          VisibilityLabelsRequest.Builder builder = VisibilityLabelsRequest.newBuilder();
-          for (String label : labels) {
-            if (label.length() > 0) {
-              VisibilityLabel.Builder newBuilder = VisibilityLabel.newBuilder();
-              newBuilder.setLabel(ByteStringer.wrap(Bytes.toBytes(label)));
-              builder.addVisLabel(newBuilder.build());
+          public VisibilityLabelsResponse call(VisibilityLabelsService service)
+          throws IOException {
+            VisibilityLabelsRequest.Builder builder = VisibilityLabelsRequest.newBuilder();
+            for (String label : labels) {
+              if (label.length() > 0) {
+                VisibilityLabel.Builder newBuilder = VisibilityLabel.newBuilder();
+                newBuilder.setLabel(ByteStringer.wrap(Bytes.toBytes(label)));
+                builder.addVisLabel(newBuilder.build());
+              }
             }
+            service.addLabels(controller, builder.build(), rpcCallback);
+            VisibilityLabelsResponse response = rpcCallback.get();
+            if (controller.failedOnException()) {
+              throw controller.getFailedOn();
+            }
+            return response;
           }
-          service.addLabels(controller, builder.build(), rpcCallback);
-          VisibilityLabelsResponse response = rpcCallback.get();
-          if (controller.failedOnException()) {
-            throw controller.getFailedOn();
-          }
-          return response;
-        }
-      };
-      Map<byte[], VisibilityLabelsResponse> result = ht.coprocessorService(
-          VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY,
-          callable);
-      return result.values().iterator().next(); // There will be exactly one region for labels
-                                                // table and so one entry in result Map.
-    } finally {
-      if (ht != null) {
-        ht.close();
+        };
+        Map<byte[], VisibilityLabelsResponse> result =
+          table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY,
+            HConstants.EMPTY_BYTE_ARRAY, callable);
+        return result.values().iterator().next(); // There will be exactly one region for labels
+        // table and so one entry in result Map.
       }
     }
   }
@@ -131,33 +131,32 @@ public class VisibilityClient {
    * @throws Throwable
    */
   public static GetAuthsResponse getAuths(Configuration conf, final String user) throws Throwable {
-    Table ht = null;
-    try {
-      ht = new HTable(conf, LABELS_TABLE_NAME);
-      Batch.Call<VisibilityLabelsService, GetAuthsResponse> callable = 
-          new Batch.Call<VisibilityLabelsService, GetAuthsResponse>() {
-        ServerRpcController controller = new ServerRpcController();
-        BlockingRpcCallback<GetAuthsResponse> rpcCallback = 
-            new BlockingRpcCallback<GetAuthsResponse>();
+    // TODO: Make it so caller passes in a Connection rather than have us do this expensive
+    // setup each time.  This class only used in test and shell at moment though.
+    try (Connection connection = ConnectionFactory.createConnection(conf)) {
+      try (Table table = connection.getTable(LABELS_TABLE_NAME)) {
+        Batch.Call<VisibilityLabelsService, GetAuthsResponse> callable = 
+            new Batch.Call<VisibilityLabelsService, GetAuthsResponse>() {
+          ServerRpcController controller = new ServerRpcController();
+          BlockingRpcCallback<GetAuthsResponse> rpcCallback = 
+              new BlockingRpcCallback<GetAuthsResponse>();
 
-        public GetAuthsResponse call(VisibilityLabelsService service) throws IOException {
-          GetAuthsRequest.Builder getAuthReqBuilder = GetAuthsRequest.newBuilder();
-          getAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user)));
-          service.getAuths(controller, getAuthReqBuilder.build(), rpcCallback);
-          GetAuthsResponse response = rpcCallback.get();
-          if (controller.failedOnException()) {
-            throw controller.getFailedOn();
+          public GetAuthsResponse call(VisibilityLabelsService service) throws IOException {
+            GetAuthsRequest.Builder getAuthReqBuilder = GetAuthsRequest.newBuilder();
+            getAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user)));
+            service.getAuths(controller, getAuthReqBuilder.build(), rpcCallback);
+            GetAuthsResponse response = rpcCallback.get();
+            if (controller.failedOnException()) {
+              throw controller.getFailedOn();
+            }
+            return response;
           }
-          return response;
-        }
-      };
-      Map<byte[], GetAuthsResponse> result = ht.coprocessorService(VisibilityLabelsService.class,
-          HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, callable);
-      return result.values().iterator().next(); // There will be exactly one region for labels
-                                                // table and so one entry in result Map.
-    } finally {
-      if (ht != null) {
-        ht.close();
+        };
+        Map<byte[], GetAuthsResponse> result =
+          table.coprocessorService(VisibilityLabelsService.class,
+            HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, callable);
+        return result.values().iterator().next(); // There will be exactly one region for labels
+        // table and so one entry in result Map.
       }
     }
   }
@@ -177,44 +176,42 @@ public class VisibilityClient {
 
   private static VisibilityLabelsResponse setOrClearAuths(Configuration conf, final String[] auths,
       final String user, final boolean setOrClear) throws IOException, ServiceException, Throwable {
-    Table ht = null;
-    try {
-      ht = new HTable(conf, LABELS_TABLE_NAME);
-      Batch.Call<VisibilityLabelsService, VisibilityLabelsResponse> callable = 
-          new Batch.Call<VisibilityLabelsService, VisibilityLabelsResponse>() {
-        ServerRpcController controller = new ServerRpcController();
-        BlockingRpcCallback<VisibilityLabelsResponse> rpcCallback = 
-            new BlockingRpcCallback<VisibilityLabelsResponse>();
+    // TODO: Make it so caller passes in a Connection rather than have us do this expensive
+    // setup each time.  This class only used in test and shell at moment though.
+    try (Connection connection = ConnectionFactory.createConnection(conf)) {
+      try (Table table = connection.getTable(LABELS_TABLE_NAME)) {
+        Batch.Call<VisibilityLabelsService, VisibilityLabelsResponse> callable = 
+            new Batch.Call<VisibilityLabelsService, VisibilityLabelsResponse>() {
+          ServerRpcController controller = new ServerRpcController();
+          BlockingRpcCallback<VisibilityLabelsResponse> rpcCallback = 
+              new BlockingRpcCallback<VisibilityLabelsResponse>();
 
-        public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException {
-          SetAuthsRequest.Builder setAuthReqBuilder = SetAuthsRequest.newBuilder();
-          setAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user)));
-          for (String auth : auths) {
-            if (auth.length() > 0) {
-              setAuthReqBuilder.addAuth(ByteStringer.wrap(Bytes.toBytes(auth)));
+          public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException {
+            SetAuthsRequest.Builder setAuthReqBuilder = SetAuthsRequest.newBuilder();
+            setAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user)));
+            for (String auth : auths) {
+              if (auth.length() > 0) {
+                setAuthReqBuilder.addAuth(ByteStringer.wrap(Bytes.toBytes(auth)));
+              }
             }
+            if (setOrClear) {
+              service.setAuths(controller, setAuthReqBuilder.build(), rpcCallback);
+            } else {
+              service.clearAuths(controller, setAuthReqBuilder.build(), rpcCallback);
+            }
+            VisibilityLabelsResponse response = rpcCallback.get();
+            if (controller.failedOnException()) {
+              throw controller.getFailedOn();
+            }
+            return response;
           }
-          if (setOrClear) {
-            service.setAuths(controller, setAuthReqBuilder.build(), rpcCallback);
-          } else {
-            service.clearAuths(controller, setAuthReqBuilder.build(), rpcCallback);
-          }
-          VisibilityLabelsResponse response = rpcCallback.get();
-          if (controller.failedOnException()) {
-            throw controller.getFailedOn();
-          }
-          return response;
-        }
-      };
-      Map<byte[], VisibilityLabelsResponse> result = ht.coprocessorService(
-          VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY,
-          callable);
-      return result.values().iterator().next(); // There will be exactly one region for labels
-                                                // table and so one entry in result Map.
-    } finally {
-      if (ht != null) {
-        ht.close();
+        };
+        Map<byte[], VisibilityLabelsResponse> result = table.coprocessorService(
+            VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY,
+            callable);
+        return result.values().iterator().next(); // There will be exactly one region for labels
+        // table and so one entry in result Map.
       }
     }
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
index d7fcb3d..f1fd7d2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
@@ -363,12 +363,10 @@ public class LocalHBaseCluster {
    * @return Name of master that just went down.
    */
   public String waitOnMaster(int serverNumber) {
-    JVMClusterUtil.MasterThread masterThread =
-      this.masterThreads.remove(serverNumber);
+    JVMClusterUtil.MasterThread masterThread = this.masterThreads.remove(serverNumber);
     while (masterThread.isAlive()) {
       try {
-        LOG.info("Waiting on " +
-          masterThread.getMaster().getServerName().toString());
+        LOG.info("Waiting on " + masterThread.getMaster().getServerName().toString());
         masterThread.join();
       } catch (InterruptedException e) {
         e.printStackTrace();

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
index aca6b27..6b79f80 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
@@ -18,9 +18,9 @@
  */
 package org.apache.hadoop.hbase;
 
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 
@@ -41,16 +41,12 @@ public interface Server extends Abortable, Stoppable {
   ZooKeeperWatcher getZooKeeper();
 
   /**
-   * Returns reference to wrapped short-circuit (i.e. local, bypassing RPC layer entirely)
-   * HConnection to this server, which may be used for miscellaneous needs.
+   * Returns a reference to the servers' cluster connection.
    *
-   * Important note: this method returns reference to connection which is managed
+   * Important note: this method returns a reference to Connection which is managed
    * by Server itself, so callers must NOT attempt to close connection obtained.
-   *
-   * See {@link org.apache.hadoop.hbase.client.ConnectionUtils#createShortCircuitHConnection}
-   * for details on short-circuit connections.
    */
-  HConnection getShortCircuitConnection();
+  ClusterConnection getConnection();
 
   /**
    * Returns instance of {@link org.apache.hadoop.hbase.zookeeper.MetaTableLocator}
@@ -69,4 +65,4 @@ public interface Server extends Abortable, Stoppable {
    * Get CoordinatedStateManager instance for this server.
    */
   CoordinatedStateManager getCoordinatedStateManager();
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java
index bba7a7d..6f06476 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java
@@ -22,7 +22,7 @@ import java.io.IOException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -36,9 +36,9 @@ public class ZKTableArchiveClient extends Configured {
 
   /** Configuration key for the archive node. */
   private static final String ZOOKEEPER_ZNODE_HFILE_ARCHIVE_KEY = "zookeeper.znode.hfile.archive";
-  private HConnection connection;
+  private ClusterConnection connection;
 
-  public ZKTableArchiveClient(Configuration conf, HConnection connection) {
+  public ZKTableArchiveClient(Configuration conf, ClusterConnection connection) {
     super(conf);
     this.connection = connection;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java
index 660733d..eab4a8a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java
@@ -18,10 +18,12 @@
  */
 package org.apache.hadoop.hbase.client;
 
-import com.google.protobuf.Descriptors.MethodDescriptor;
-import com.google.protobuf.Message;
-import com.google.protobuf.Service;
-import com.google.protobuf.ServiceException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutorService;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
@@ -32,11 +34,10 @@ import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
 import org.apache.hadoop.io.MultipleIOException;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ExecutorService;
+import com.google.protobuf.Descriptors.MethodDescriptor;
+import com.google.protobuf.Message;
+import com.google.protobuf.Service;
+import com.google.protobuf.ServiceException;
 
 /**
  * A wrapper for HTable. Can be used to restrict privilege.
@@ -55,13 +56,13 @@ import java.util.concurrent.ExecutorService;
 public class HTableWrapper implements HTableInterface {
 
   private TableName tableName;
-  private HTable table;
+  private final Table table;
   private ClusterConnection connection;
   private final List<HTableInterface> openTables;
 
   /**
    * @param openTables External list of tables used for tracking wrappers.
-   * @throws IOException 
+   * @throws IOException
    */
   public static HTableInterface createWrapper(List<HTableInterface> openTables,
       TableName tableName, Environment env, ExecutorService pool) throws IOException {
@@ -73,7 +74,7 @@ public class HTableWrapper implements HTableInterface {
       ClusterConnection connection, ExecutorService pool)
       throws IOException {
     this.tableName = tableName;
-    this.table = new HTable(tableName, connection, pool);
+    this.table = connection.getTable(tableName, pool);
     this.connection = connection;
     this.openTables = openTables;
     this.openTables.add(this);
@@ -82,7 +83,7 @@ public class HTableWrapper implements HTableInterface {
   public void internalClose() throws IOException {
     List<IOException> exceptions = new ArrayList<IOException>(2);
     try {
-    table.close();
+      table.close();
     } catch (IOException e) {
       exceptions.add(e);
     }
@@ -114,7 +115,12 @@ public class HTableWrapper implements HTableInterface {
   @Deprecated
   public Result getRowOrBefore(byte[] row, byte[] family)
       throws IOException {
-    return table.getRowOrBefore(row, family);
+    Scan scan = Scan.createGetClosestRowOrBeforeReverseScan(row);
+    Result startRowResult = null;
+    try (ResultScanner resultScanner = this.table.getScanner(scan)) {
+      startRowResult = resultScanner.next();
+    }
+    return startRowResult;
   }
 
   public Result get(Get get) throws IOException {
@@ -130,8 +136,15 @@ public class HTableWrapper implements HTableInterface {
   }
 
   @Deprecated
-  public Boolean[] exists(List<Get> gets) throws IOException{
-    return table.exists(gets);
+  public Boolean[] exists(List<Get> gets) throws IOException {
+    // Do convertion.
+    boolean [] exists = table.existsAll(gets);
+    if (exists == null) return null;
+    Boolean [] results = new Boolean [exists.length];
+    for (int i = 0; i < exists.length; i++) {
+      results[i] = exists[i]? Boolean.TRUE: Boolean.FALSE;
+    }
+    return results;
   }
 
   public void put(Put put) throws IOException {
@@ -254,7 +267,7 @@ public class HTableWrapper implements HTableInterface {
   /**
    * {@inheritDoc}
    * @deprecated If any exception is thrown by one of the actions, there is no way to
-   * retrieve the partially executed results. Use 
+   * retrieve the partially executed results. Use
    * {@link #batchCallback(List, Object[], org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)}
    * instead.
    */
@@ -296,12 +309,21 @@ public class HTableWrapper implements HTableInterface {
 
   @Override
   public void setAutoFlush(boolean autoFlush) {
-    table.setAutoFlush(autoFlush, autoFlush);
+    table.setAutoFlushTo(autoFlush);
   }
 
   @Override
   public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail) {
-    table.setAutoFlush(autoFlush, clearBufferOnFail);
+    setAutoFlush(autoFlush);
+    if (!autoFlush && !clearBufferOnFail) {
+      // We don't support his combination.  In HTable, the implementation is this:
+      //
+      // this.clearBufferOnFail = autoFlush || clearBufferOnFail
+      //
+      // So if autoFlush == false and clearBufferOnFail is false, that is not supported in
+      // the new Table Interface so just throwing UnsupportedOperationException here.
+      throw new UnsupportedOperationException("Can't do this via wrapper");
+    }
   }
 
   @Override
@@ -322,7 +344,8 @@ public class HTableWrapper implements HTableInterface {
   @Override
   public long incrementColumnValue(byte[] row, byte[] family,
       byte[] qualifier, long amount, boolean writeToWAL) throws IOException {
-    return table.incrementColumnValue(row, family, qualifier, amount, writeToWAL);
+    return table.incrementColumnValue(row, family, qualifier, amount,
+        writeToWAL? Durability.USE_DEFAULT: Durability.SKIP_WAL);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java
index 11acea0..be131e8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java
@@ -22,11 +22,12 @@ import java.io.IOException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -47,26 +48,28 @@ import org.apache.hadoop.mapred.Partitioner;
 public class HRegionPartitioner<K2,V2>
 implements Partitioner<ImmutableBytesWritable, V2> {
   private static final Log LOG = LogFactory.getLog(HRegionPartitioner.class);
-  private RegionLocator table;
+  // Connection and locator are not cleaned up; they just die when partitioner is done.
+  private Connection connection;
+  private RegionLocator locator;
   private byte[][] startKeys;
 
   public void configure(JobConf job) {
     try {
-      this.table = new HTable(HBaseConfiguration.create(job),
-        TableName.valueOf(job.get(TableOutputFormat.OUTPUT_TABLE)));
+      this.connection = ConnectionFactory.createConnection(HBaseConfiguration.create(job));
+      TableName tableName = TableName.valueOf(job.get(TableOutputFormat.OUTPUT_TABLE));
+      this.locator = this.connection.getRegionLocator(tableName);
     } catch (IOException e) {
       LOG.error(e);
     }
 
     try {
-      this.startKeys = this.table.getStartKeys();
+      this.startKeys = this.locator.getStartKeys();
     } catch (IOException e) {
       LOG.error(e);
     }
   }
 
-  public int getPartition(ImmutableBytesWritable key,
-      V2 value, int numPartitions) {
+  public int getPartition(ImmutableBytesWritable key, V2 value, int numPartitions) {
     byte[] region = null;
     // Only one region return 0
     if (this.startKeys.length == 1){
@@ -75,7 +78,7 @@ implements Partitioner<ImmutableBytesWritable, V2> {
     try {
       // Not sure if this is cached after a split so we could have problems
       // here if a region splits while mapping
-      region = table.getRegionLocation(key.get()).getRegionInfo().getStartKey();
+      region = locator.getRegionLocation(key.get()).getRegionInfo().getStartKey();
     } catch (IOException e) {
       LOG.error(e);
     }
@@ -92,4 +95,4 @@ implements Partitioner<ImmutableBytesWritable, V2> {
     // if above fails to find start key that match we need to return something
     return 0;
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
index 0f03159..1afb9d6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.mapreduce.MutationSerialization;
@@ -211,7 +212,8 @@ public class TableMapReduceUtil {
         MutationSerialization.class.getName(), ResultSerialization.class.getName());
     if (partitioner == HRegionPartitioner.class) {
       job.setPartitionerClass(HRegionPartitioner.class);
-      int regions = MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table);
+      int regions =
+        MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), TableName.valueOf(table));
       if (job.getNumReduceTasks() > regions) {
         job.setNumReduceTasks(regions);
       }
@@ -275,9 +277,11 @@ public class TableMapReduceUtil {
    * @param job  The current job configuration to adjust.
    * @throws IOException When retrieving the table details fails.
    */
+  // Used by tests.
   public static void limitNumReduceTasks(String table, JobConf job)
   throws IOException {
-    int regions = MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table);
+    int regions =
+      MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), TableName.valueOf(table));
     if (job.getNumReduceTasks() > regions)
       job.setNumReduceTasks(regions);
   }
@@ -290,9 +294,11 @@ public class TableMapReduceUtil {
    * @param job  The current job configuration to adjust.
    * @throws IOException When retrieving the table details fails.
    */
+  // Used by tests.
   public static void limitNumMapTasks(String table, JobConf job)
   throws IOException {
-    int regions = MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table);
+    int regions =
+      MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), TableName.valueOf(table));
     if (job.getNumMapTasks() > regions)
       job.setNumMapTasks(regions);
   }
@@ -307,7 +313,8 @@ public class TableMapReduceUtil {
    */
   public static void setNumReduceTasks(String table, JobConf job)
   throws IOException {
-    job.setNumReduceTasks(MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table));
+    job.setNumReduceTasks(MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job),
+      TableName.valueOf(table)));
   }
 
   /**
@@ -320,7 +327,8 @@ public class TableMapReduceUtil {
    */
   public static void setNumMapTasks(String table, JobConf job)
   throws IOException {
-    job.setNumMapTasks(MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table));
+    job.setNumMapTasks(MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job),
+      TableName.valueOf(table)));
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
index dab39a8..563b1f8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
@@ -20,23 +20,19 @@ package org.apache.hadoop.hbase.mapred;
 
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.mapred.FileOutputFormat;
 import org.apache.hadoop.mapred.InvalidJobConfException;
 import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.FileOutputFormat;
 import org.apache.hadoop.mapred.RecordWriter;
 import org.apache.hadoop.mapred.Reporter;
 import org.apache.hadoop.util.Progressable;
@@ -50,55 +46,51 @@ public class TableOutputFormat extends FileOutputFormat<ImmutableBytesWritable,
 
   /** JobConf parameter that specifies the output table */
   public static final String OUTPUT_TABLE = "hbase.mapred.outputtable";
-  private static final Log LOG = LogFactory.getLog(TableOutputFormat.class);
 
   /**
    * Convert Reduce output (key, value) to (HStoreKey, KeyedDataArrayWritable)
    * and write to an HBase table.
    */
   protected static class TableRecordWriter implements RecordWriter<ImmutableBytesWritable, Put> {
-    private final Connection conn;
-    private final Table table;
+    private Table m_table;
 
     /**
      * Instantiate a TableRecordWriter with the HBase HClient for writing. Assumes control over the
      * lifecycle of {@code conn}.
      */
-    public TableRecordWriter(Connection conn, TableName tableName) throws IOException {
-      this.conn = conn;
-      this.table = conn.getTable(tableName);
-      ((HTable) this.table).setAutoFlush(false, true);
+    public TableRecordWriter(final Table table) throws IOException {
+      this.m_table = table;
     }
 
     public void close(Reporter reporter) throws IOException {
-      table.close();
-      conn.close();
+      this.m_table.close();
     }
 
     public void write(ImmutableBytesWritable key, Put value) throws IOException {
-      table.put(new Put(value));
+      m_table.put(new Put(value));
     }
   }
 
   @Override
-  public RecordWriter<ImmutableBytesWritable, Put> getRecordWriter(FileSystem ignored, JobConf job,
-      String name, Progressable progress) throws IOException {
+  public RecordWriter getRecordWriter(FileSystem ignored, JobConf job, String name,
+      Progressable progress)
+  throws IOException {
+    // expecting exactly one path
     TableName tableName = TableName.valueOf(job.get(OUTPUT_TABLE));
-    Connection conn = null;
-    try {
-      conn = ConnectionFactory.createConnection(HBaseConfiguration.create(job));
-    } catch(IOException e) {
-      LOG.error(e);
-      throw e;
-    }
-    return new TableRecordWriter(conn, tableName);
+    Table table =  null;
+    // Connection is not closed. Dies with JVM.  No possibility for cleanup.
+    Connection connection = ConnectionFactory.createConnection(job);
+    table = connection.getTable(tableName);
+    // Clear write buffer on fail is true by default so no need to reset it.
+    table.setAutoFlushTo(false);
+    return new TableRecordWriter(table);
   }
 
   @Override
   public void checkOutputSpecs(FileSystem ignored, JobConf job)
-      throws FileAlreadyExistsException, InvalidJobConfException, IOException {
+  throws FileAlreadyExistsException, InvalidJobConfException, IOException {
     String tableName = job.get(OUTPUT_TABLE);
-    if(tableName == null) {
+    if (tableName == null) {
       throw new IOException("Must specify table name");
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java
index 150bb25..deb59c4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java
@@ -28,11 +28,12 @@ import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.Tag;
-import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
@@ -66,44 +67,55 @@ public class DefaultVisibilityExpressionResolver implements VisibilityExpression
   @Override
   public void init() {
     // Reading all the labels and ordinal.
-    // This scan should be done by user with global_admin previliges.. Ensure that it works
+    // This scan should be done by user with global_admin privileges.. Ensure that it works
     Table labelsTable = null;
+    Connection connection = null;
     try {
-      labelsTable = new HTable(conf, LABELS_TABLE_NAME);
-    } catch (TableNotFoundException e) {
-      // Just return with out doing any thing. When the VC is not used we wont be having 'labels'
-      // table in the cluster.
-      return;
-    } catch (IOException e) {
-      LOG.error("Error opening 'labels' table", e);
-      return;
-    }
-    Scan scan = new Scan();
-    scan.setAuthorizations(new Authorizations(VisibilityUtils.SYSTEM_LABEL));
-    scan.addColumn(LABELS_TABLE_FAMILY, LABEL_QUALIFIER);
-    ResultScanner scanner = null;
-    try {
-      scanner = labelsTable.getScanner(scan);
-      Result next = null;
-      while ((next = scanner.next()) != null) {
-        byte[] row = next.getRow();
-        byte[] value = next.getValue(LABELS_TABLE_FAMILY, LABEL_QUALIFIER);
-        labels.put(Bytes.toString(value), Bytes.toInt(row));
+      connection = ConnectionFactory.createConnection(conf);
+      try {
+        labelsTable = connection.getTable(LABELS_TABLE_NAME);
+      } catch (TableNotFoundException e) {
+        // Just return with out doing any thing. When the VC is not used we wont be having 'labels'
+        // table in the cluster.
+        return;
+      } catch (IOException e) {
+        LOG.error("Error opening 'labels' table", e);
+        return;
       }
-    } catch (IOException e) {
-      LOG.error("Error reading 'labels' table", e);
-    } finally {
+      Scan scan = new Scan();
+      scan.setAuthorizations(new Authorizations(VisibilityUtils.SYSTEM_LABEL));
+      scan.addColumn(LABELS_TABLE_FAMILY, LABEL_QUALIFIER);
+      ResultScanner scanner = null;
       try {
-        if (scanner != null) {
-          scanner.close();
+        scanner = labelsTable.getScanner(scan);
+        Result next = null;
+        while ((next = scanner.next()) != null) {
+          byte[] row = next.getRow();
+          byte[] value = next.getValue(LABELS_TABLE_FAMILY, LABEL_QUALIFIER);
+          labels.put(Bytes.toString(value), Bytes.toInt(row));
         }
+      } catch (IOException e) {
+        LOG.error("Error scanning 'labels' table", e);
       } finally {
+        if (scanner != null) scanner.close();
+      }
+    } catch (IOException ioe) {
+      LOG.error("Failed reading 'labels' tags", ioe);
+      return;
+    } finally {
+      if (labelsTable != null) {
         try {
           labelsTable.close();
-        } catch (IOException e) {
-          LOG.warn("Error on closing 'labels' table", e);
+        } catch (IOException ioe) {
+          LOG.warn("Error closing 'labels' table", ioe);
         }
       }
+      if (connection != null)
+        try {
+          connection.close();
+        } catch (IOException ioe) {
+          LOG.warn("Failed close of temporary connection", ioe);
+        }
     }
   }
 
@@ -117,4 +129,4 @@ public class DefaultVisibilityExpressionResolver implements VisibilityExpression
     };
     return VisibilityUtils.createVisibilityExpTags(visExpression, true, false, null, provider);
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java
index f88d959..24ca8e6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java
@@ -28,9 +28,11 @@ import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.mapred.TableOutputFormat;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.mapreduce.Partitioner;
 
@@ -55,7 +57,9 @@ implements Configurable {
 
   private static final Log LOG = LogFactory.getLog(HRegionPartitioner.class);
   private Configuration conf = null;
-  private RegionLocator table;
+  // Connection and locator are not cleaned up; they just die when partitioner is done.
+  private Connection connection;
+  private RegionLocator locator;
   private byte[][] startKeys;
 
   /**
@@ -82,7 +86,7 @@ implements Configurable {
     try {
       // Not sure if this is cached after a split so we could have problems
       // here if a region splits while mapping
-      region = table.getRegionLocation(key.get()).getRegionInfo().getStartKey();
+      region = this.locator.getRegionLocation(key.get()).getRegionInfo().getStartKey();
     } catch (IOException e) {
       LOG.error(e);
     }
@@ -123,14 +127,14 @@ implements Configurable {
   public void setConf(Configuration configuration) {
     this.conf = HBaseConfiguration.create(configuration);
     try {
-      TableName tableName = TableName.valueOf(configuration
-          .get(TableOutputFormat.OUTPUT_TABLE));
-      this.table = new HTable(this.conf, tableName);
+      this.connection = ConnectionFactory.createConnection(HBaseConfiguration.create(conf));
+      TableName tableName = TableName.valueOf(conf.get(TableOutputFormat.OUTPUT_TABLE));
+      this.locator = this.connection.getRegionLocator(tableName);
     } catch (IOException e) {
       LOG.error(e);
     }
     try {
-      this.startKeys = this.table.getStartKeys();
+      this.startKeys = this.locator.getStartKeys();
     } catch (IOException e) {
       LOG.error(e);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
index f586523..b54e3ea 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
@@ -41,9 +41,12 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Base64;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -401,86 +404,90 @@ public class ImportTsv extends Configured implements Tool {
    */
   public static Job createSubmittableJob(Configuration conf, String[] args)
       throws IOException, ClassNotFoundException {
+    Job job = null;
+    try (Connection connection = ConnectionFactory.createConnection(conf)) {
+      try (Admin admin = connection.getAdmin()) {
+        // Support non-XML supported characters
+        // by re-encoding the passed separator as a Base64 string.
+        String actualSeparator = conf.get(SEPARATOR_CONF_KEY);
+        if (actualSeparator != null) {
+          conf.set(SEPARATOR_CONF_KEY,
+              Base64.encodeBytes(actualSeparator.getBytes()));
+        }
 
-    HBaseAdmin admin = new HBaseAdmin(conf);
-    // Support non-XML supported characters
-    // by re-encoding the passed separator as a Base64 string.
-    String actualSeparator = conf.get(SEPARATOR_CONF_KEY);
-    if (actualSeparator != null) {
-      conf.set(SEPARATOR_CONF_KEY,
-               Base64.encodeBytes(actualSeparator.getBytes()));
-    }
+        // See if a non-default Mapper was set
+        String mapperClassName = conf.get(MAPPER_CONF_KEY);
+        Class mapperClass =
+          mapperClassName != null ? Class.forName(mapperClassName) : DEFAULT_MAPPER;
+
+          TableName tableName = TableName.valueOf(args[0]);
+          Path inputDir = new Path(args[1]);
+          String jobName = conf.get(JOB_NAME_CONF_KEY,NAME + "_" + tableName.getNameAsString());
+          job = Job.getInstance(conf, jobName);
+          job.setJarByClass(mapperClass);
+          FileInputFormat.setInputPaths(job, inputDir);
+          job.setInputFormatClass(TextInputFormat.class);
+          job.setMapperClass(mapperClass);
+          String hfileOutPath = conf.get(BULK_OUTPUT_CONF_KEY);
+          String columns[] = conf.getStrings(COLUMNS_CONF_KEY);
+          if(StringUtils.isNotEmpty(conf.get(CREDENTIALS_LOCATION))) {
+            String fileLoc = conf.get(CREDENTIALS_LOCATION);
+            Credentials cred = Credentials.readTokenStorageFile(new File(fileLoc), conf);
+            job.getCredentials().addAll(cred);
+          }
 
-    // See if a non-default Mapper was set
-    String mapperClassName = conf.get(MAPPER_CONF_KEY);
-    Class mapperClass = mapperClassName != null ?
-        Class.forName(mapperClassName) : DEFAULT_MAPPER;
-
-    TableName tableName = TableName.valueOf(args[0]);
-    Path inputDir = new Path(args[1]);
-    String jobName = conf.get(JOB_NAME_CONF_KEY,NAME + "_" + tableName.getNameAsString());
-    Job job = Job.getInstance(conf, jobName);
-    job.setJarByClass(mapperClass);
-    FileInputFormat.setInputPaths(job, inputDir);
-    job.setInputFormatClass(TextInputFormat.class);
-    job.setMapperClass(mapperClass);
-    String hfileOutPath = conf.get(BULK_OUTPUT_CONF_KEY);
-    String columns[] = conf.getStrings(COLUMNS_CONF_KEY);
-    if(StringUtils.isNotEmpty(conf.get(CREDENTIALS_LOCATION))) {
-      String fileLoc = conf.get(CREDENTIALS_LOCATION);
-      Credentials cred = Credentials.readTokenStorageFile(new File(fileLoc), conf);
-      job.getCredentials().addAll(cred);
-    }
+          if (hfileOutPath != null) {
+            if (!admin.tableExists(tableName)) {
+              String errorMsg = format("Table '%s' does not exist.", tableName);
+              if ("yes".equalsIgnoreCase(conf.get(CREATE_TABLE_CONF_KEY, "yes"))) {
+                LOG.warn(errorMsg);
+                // TODO: this is backwards. Instead of depending on the existence of a table,
+                // create a sane splits file for HFileOutputFormat based on data sampling.
+                createTable(admin, tableName, columns);
+              } else {
+                LOG.error(errorMsg);
+                throw new TableNotFoundException(errorMsg);
+              }
+            }
+            try (HTable table = (HTable)connection.getTable(tableName)) {
+              job.setReducerClass(PutSortReducer.class);
+              Path outputDir = new Path(hfileOutPath);
+              FileOutputFormat.setOutputPath(job, outputDir);
+              job.setMapOutputKeyClass(ImmutableBytesWritable.class);
+              if (mapperClass.equals(TsvImporterTextMapper.class)) {
+                job.setMapOutputValueClass(Text.class);
+                job.setReducerClass(TextSortReducer.class);
+              } else {
+                job.setMapOutputValueClass(Put.class);
+                job.setCombinerClass(PutCombiner.class);
+              }
+              HFileOutputFormat.configureIncrementalLoad(job, table);
+            }
+          } else {
+            if (!admin.tableExists(tableName)) {
+              String errorMsg = format("Table '%s' does not exist.", tableName);
+              LOG.error(errorMsg);
+              throw new TableNotFoundException(errorMsg);
+            }
+            if (mapperClass.equals(TsvImporterTextMapper.class)) {
+              usage(TsvImporterTextMapper.class.toString()
+                  + " should not be used for non bulkloading case. use "
+                  + TsvImporterMapper.class.toString()
+                  + " or custom mapper whose value type is Put.");
+              System.exit(-1);
+            }
+            // No reducers. Just write straight to table. Call initTableReducerJob
+            // to set up the TableOutputFormat.
+            TableMapReduceUtil.initTableReducerJob(tableName.getNameAsString(), null,
+                job);
+            job.setNumReduceTasks(0);
+          }
 
-    if (hfileOutPath != null) {
-      if (!admin.tableExists(tableName)) {
-        String errorMsg = format("Table '%s' does not exist.", tableName);
-        if ("yes".equalsIgnoreCase(conf.get(CREATE_TABLE_CONF_KEY, "yes"))) {
-          LOG.warn(errorMsg);
-          // TODO: this is backwards. Instead of depending on the existence of a table,
-          // create a sane splits file for HFileOutputFormat based on data sampling.
-          createTable(admin, tableName, columns);
-        } else {
-          LOG.error(errorMsg);
-          throw new TableNotFoundException(errorMsg);
-        }
+          TableMapReduceUtil.addDependencyJars(job);
+          TableMapReduceUtil.addDependencyJars(job.getConfiguration(),
+              com.google.common.base.Function.class /* Guava used by TsvParser */);
       }
-      HTable table = new HTable(conf, tableName);
-      job.setReducerClass(PutSortReducer.class);
-      Path outputDir = new Path(hfileOutPath);
-      FileOutputFormat.setOutputPath(job, outputDir);
-      job.setMapOutputKeyClass(ImmutableBytesWritable.class);
-      if (mapperClass.equals(TsvImporterTextMapper.class)) {
-        job.setMapOutputValueClass(Text.class);
-        job.setReducerClass(TextSortReducer.class);
-      } else {
-        job.setMapOutputValueClass(Put.class);
-        job.setCombinerClass(PutCombiner.class);
-      }
-      HFileOutputFormat.configureIncrementalLoad(job, table);
-    } else {
-      if (!admin.tableExists(tableName)) {
-        String errorMsg = format("Table '%s' does not exist.", tableName);
-        LOG.error(errorMsg);
-        throw new TableNotFoundException(errorMsg);
-      }
-      if (mapperClass.equals(TsvImporterTextMapper.class)) {
-        usage(TsvImporterTextMapper.class.toString()
-            + " should not be used for non bulkloading case. use "
-            + TsvImporterMapper.class.toString()
-            + " or custom mapper whose value type is Put.");
-        System.exit(-1);
-      }
-      // No reducers. Just write straight to table. Call initTableReducerJob
-      // to set up the TableOutputFormat.
-      TableMapReduceUtil.initTableReducerJob(tableName.getNameAsString(), null,
-          job);
-      job.setNumReduceTasks(0);
     }
-
-    TableMapReduceUtil.addDependencyJars(job);
-    TableMapReduceUtil.addDependencyJars(job.getConfiguration(),
-        com.google.common.base.Function.class /* Guava used by TsvParser */);
     return job;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java
index dccaa25..50da9bc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java
@@ -252,9 +252,10 @@ implements Configurable {
   protected Pair<byte[][], byte[][]> getStartEndKeys() throws IOException {
     if (conf.get(SPLIT_TABLE) != null) {
       TableName splitTableName = TableName.valueOf(conf.get(SPLIT_TABLE));
-      try (Connection conn = ConnectionFactory.createConnection(getConf());
-          RegionLocator rl = conn.getRegionLocator(splitTableName)) {
-        return rl.getStartEndKeys();
+      try (Connection conn = ConnectionFactory.createConnection(getConf())) {
+        try (RegionLocator rl = conn.getRegionLocator(splitTableName)) {
+          return rl.getStartEndKeys();
+        }
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
index f69be50..ebe7d36 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
@@ -34,8 +34,6 @@ import java.util.zip.ZipFile;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -43,10 +41,11 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.security.User;
@@ -662,7 +661,7 @@ public class TableMapReduceUtil {
     job.setOutputValueClass(Writable.class);
     if (partitioner == HRegionPartitioner.class) {
       job.setPartitionerClass(HRegionPartitioner.class);
-      int regions = MetaTableAccessor.getRegionCount(conf, table);
+      int regions = MetaTableAccessor.getRegionCount(conf, TableName.valueOf(table));
       if (job.getNumReduceTasks() > regions) {
         job.setNumReduceTasks(regions);
       }
@@ -687,7 +686,8 @@ public class TableMapReduceUtil {
    */
   public static void limitNumReduceTasks(String table, Job job)
   throws IOException {
-    int regions = MetaTableAccessor.getRegionCount(job.getConfiguration(), table);
+    int regions =
+      MetaTableAccessor.getRegionCount(job.getConfiguration(), TableName.valueOf(table));
     if (job.getNumReduceTasks() > regions)
       job.setNumReduceTasks(regions);
   }
@@ -702,7 +702,8 @@ public class TableMapReduceUtil {
    */
   public static void setNumReduceTasks(String table, Job job)
   throws IOException {
-    job.setNumReduceTasks(MetaTableAccessor.getRegionCount(job.getConfiguration(), table));
+    job.setNumReduceTasks(MetaTableAccessor.getRegionCount(job.getConfiguration(),
+       TableName.valueOf(table)));
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
index 84f6891..de29f37 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
@@ -47,7 +47,6 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
  * Convert Map/Reduce output and write it to an HBase table. The KEY is ignored
  * while the output value <u>must</u> be either a {@link Put} or a
  * {@link Delete} instance.
- *
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index 7f41aa1..36b322f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -339,7 +339,7 @@ public class AssignmentManager {
     if (TableName.META_TABLE_NAME.equals(tableName)) {
       hris = new MetaTableLocator().getMetaRegions(server.getZooKeeper());
     } else {
-      hris = MetaTableAccessor.getTableRegions(server.getShortCircuitConnection(), tableName, true);
+      hris = MetaTableAccessor.getTableRegions(server.getConnection(), tableName, true);
     }
 
     Integer pending = 0;
@@ -565,7 +565,7 @@ public class AssignmentManager {
           ((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region));
     }
     FavoredNodeAssignmentHelper.updateMetaWithFavoredNodesInfo(regionToFavoredNodes,
-      this.server.getShortCircuitConnection());
+      this.server.getConnection());
   }
 
   /**
@@ -1564,7 +1564,7 @@ public class AssignmentManager {
             TableState.State.ENABLING);
 
     // Region assignment from META
-    List<Result> results = MetaTableAccessor.fullScanOfMeta(server.getShortCircuitConnection());
+    List<Result> results = MetaTableAccessor.fullScanOfMeta(server.getConnection());
     // Get any new but slow to checkin region server that joined the cluster
     Set<ServerName> onlineServers = serverManager.getOnlineServers().keySet();
     // Set of offline servers to be returned

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index 886991c..25c405c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -29,18 +29,19 @@ import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Chore;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.HFileArchiver;
-import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.MetaScanner;
 import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
 import org.apache.hadoop.hbase.client.Result;
@@ -62,6 +63,7 @@ public class CatalogJanitor extends Chore {
   private final MasterServices services;
   private AtomicBoolean enabled = new AtomicBoolean(true);
   private AtomicBoolean alreadyRunning = new AtomicBoolean(false);
+  private final Connection connection;
 
   CatalogJanitor(final Server server, final MasterServices services) {
     super("CatalogJanitor-" + server.getServerName().toShortString(),
@@ -69,6 +71,7 @@ public class CatalogJanitor extends Chore {
       server);
     this.server = server;
     this.services = services;
+    this.connection = server.getConnection();
   }
 
   @Override
@@ -163,7 +166,7 @@ public class CatalogJanitor extends Chore {
 
     // Run full scan of hbase:meta catalog table passing in our custom visitor with
     // the start row
-    MetaScanner.metaScan(server.getConfiguration(), null, visitor, tableName);
+    MetaScanner.metaScan(server.getConfiguration(), this.connection, visitor, tableName);
 
     return new Triple<Integer, Map<HRegionInfo, Result>, Map<HRegionInfo, Result>>(
         count.get(), mergedRegions, splitParents);
@@ -198,7 +201,7 @@ public class CatalogJanitor extends Chore {
           + " from fs because merged region no longer holds references");
       HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionA);
       HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionB);
-      MetaTableAccessor.deleteMergeQualifiers(server.getShortCircuitConnection(),
+      MetaTableAccessor.deleteMergeQualifiers(server.getConnection(),
         mergedRegion);
       return true;
     }
@@ -331,7 +334,7 @@ public class CatalogJanitor extends Chore {
       FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
       if (LOG.isTraceEnabled()) LOG.trace("Archiving parent region: " + parent);
       HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, parent);
-      MetaTableAccessor.deleteRegion(this.server.getShortCircuitConnection(), parent);
+      MetaTableAccessor.deleteRegion(this.connection, parent);
       result = true;
     }
     return result;
@@ -404,7 +407,7 @@ public class CatalogJanitor extends Chore {
     // Get merge regions if it is a merged region and already has merge
     // qualifier
     Pair<HRegionInfo, HRegionInfo> mergeRegions = MetaTableAccessor
-        .getRegionsFromMergeQualifier(this.services.getShortCircuitConnection(),
+        .getRegionsFromMergeQualifier(this.services.getConnection(),
           region.getRegionName());
     if (mergeRegions == null
         || (mergeRegions.getFirst() == null && mergeRegions.getSecond() == null)) {
@@ -420,4 +423,4 @@ public class CatalogJanitor extends Chore {
     return cleanMergeRegion(region, mergeRegions.getFirst(),
         mergeRegions.getSecond());
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 3437f34..739ac76 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -538,12 +538,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
     ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
     this.serverManager = createServerManager(this, this);
 
-    synchronized (this) {
-      if (shortCircuitConnection == null) {
-        shortCircuitConnection = createShortCircuitConnection();
-        metaTableLocator = new MetaTableLocator();
-      }
-    }
+    setupClusterConnection();
 
     // Invalidate all write locks held previously
     this.tableLockManager.reapWriteLocks();
@@ -721,7 +716,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
       metaState.getState(), metaState.getServerName(), null);
 
     if (!metaState.isOpened() || !metaTableLocator.verifyMetaRegionLocation(
-        this.getShortCircuitConnection(), this.getZooKeeper(), timeout)) {
+        this.getConnection(), this.getZooKeeper(), timeout)) {
       ServerName currentMetaServer = metaState.getServerName();
       if (serverManager.isServerOnline(currentMetaServer)) {
         LOG.info("Meta was in transition on " + currentMetaServer);
@@ -1492,6 +1487,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
    * is found, but not currently deployed, the second element of the pair
    * may be null.
    */
+  @VisibleForTesting // Used by TestMaster.
   Pair<HRegionInfo, ServerName> getTableRegionForRow(
       final TableName tableName, final byte [] rowKey)
   throws IOException {
@@ -1542,7 +1538,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
     if (isCatalogTable(tableName)) {
       throw new IOException("Can't modify catalog tables");
     }
-    if (!MetaTableAccessor.tableExists(getShortCircuitConnection(), tableName)) {
+    if (!MetaTableAccessor.tableExists(getConnection(), tableName)) {
       throw new TableNotFoundException(tableName);
     }
     if (!getAssignmentManager().getTableStateManager().

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 2efcf63..c3012f5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -1132,7 +1132,7 @@ public class MasterRpcServices extends RSRpcServices
     try {
       master.checkInitialized();
       Pair<HRegionInfo, ServerName> pair =
-        MetaTableAccessor.getRegion(master.getShortCircuitConnection(), regionName);
+        MetaTableAccessor.getRegion(master.getConnection(), regionName);
       if (pair == null) throw new UnknownRegionException(Bytes.toStringBinary(regionName));
       HRegionInfo hri = pair.getFirst();
       if (master.cpHost != null) {
@@ -1263,7 +1263,7 @@ public class MasterRpcServices extends RSRpcServices
           + " actual: " + type);
       }
       Pair<HRegionInfo, ServerName> pair =
-        MetaTableAccessor.getRegion(master.getShortCircuitConnection(), regionName);
+        MetaTableAccessor.getRegion(master.getConnection(), regionName);
       if (pair == null) throw new UnknownRegionException(Bytes.toString(regionName));
       HRegionInfo hri = pair.getFirst();
       if (master.cpHost != null) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java
index ae4af4a..8f7d0f3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java
@@ -240,11 +240,11 @@ public class RegionStateStore {
 
   void splitRegion(HRegionInfo p,
       HRegionInfo a, HRegionInfo b, ServerName sn) throws IOException {
-    MetaTableAccessor.splitRegion(server.getShortCircuitConnection(), p, a, b, sn);
+    MetaTableAccessor.splitRegion(server.getConnection(), p, a, b, sn);
   }
 
   void mergeRegions(HRegionInfo p,
       HRegionInfo a, HRegionInfo b, ServerName sn) throws IOException {
-    MetaTableAccessor.mergeRegions(server.getShortCircuitConnection(), p, a, b, sn);
+    MetaTableAccessor.mergeRegions(server.getConnection(), p, a, b, sn);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
index b96aaee..796536a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
@@ -934,7 +934,7 @@ public class RegionStates {
 
     try {
       Pair<HRegionInfo, ServerName> p =
-        MetaTableAccessor.getRegion(server.getShortCircuitConnection(), regionName);
+        MetaTableAccessor.getRegion(server.getConnection(), regionName);
       HRegionInfo hri = p == null ? null : p.getFirst();
       if (hri != null) {
         createRegionState(hri);

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
index 7132555..31d3fab 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
@@ -41,7 +41,6 @@ import org.apache.hadoop.hbase.ZKNamespaceManager;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
@@ -69,7 +68,7 @@ public class TableNamespaceManager {
 
   private Configuration conf;
   private MasterServices masterServices;
-  private HTable nsTable;
+  private Table nsTable;
   private ZKNamespaceManager zkNamespaceManager;
   private boolean initialized;
 
@@ -82,7 +81,7 @@ public class TableNamespaceManager {
   }
 
   public void start() throws IOException {
-    if (!MetaTableAccessor.tableExists(masterServices.getShortCircuitConnection(),
+    if (!MetaTableAccessor.tableExists(masterServices.getConnection(),
         TableName.NAMESPACE_TABLE_NAME)) {
       LOG.info("Namespace table not found. Creating...");
       createNamespaceTable(masterServices);
@@ -253,16 +252,14 @@ public class TableNamespaceManager {
   public synchronized boolean isTableAvailableAndInitialized() throws IOException {
     // Did we already get a table? If so, still make sure it's available
     if (initialized) {
-      if (nsTable.getConnection().isClosed()) {
-        nsTable = new HTable(conf, TableName.NAMESPACE_TABLE_NAME);
-      }
+      this.nsTable = this.masterServices.getConnection().getTable(TableName.NAMESPACE_TABLE_NAME);
       return true;
     }
 
     // Now check if the table is assigned, if not then fail fast
     if (isTableAssigned() && isTableEnabled()) {
       try {
-        nsTable = new HTable(conf, TableName.NAMESPACE_TABLE_NAME);
+        nsTable = this.masterServices.getConnection().getTable(TableName.NAMESPACE_TABLE_NAME);
         zkNamespaceManager = new ZKNamespaceManager(masterServices.getZooKeeper());
         zkNamespaceManager.start();
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java
index 01c1f89..c884806 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java
@@ -25,21 +25,21 @@ import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.Random;
 import java.util.Set;
-import java.util.Map.Entry;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.master.RackManager;
@@ -121,12 +121,14 @@ public class FavoredNodeAssignmentHelper {
       }
     }
     // Write the region assignments to the meta table.
-    Table metaTable = null;
-    try {
-      metaTable = new HTable(conf, TableName.META_TABLE_NAME);
-      metaTable.put(puts);
-    } finally {
-      if (metaTable != null) metaTable.close();
+    // TODO: See above overrides take a Connection rather than a Configuration only the
+    // Connection is a short circuit connection. That is not going to good in all cases, when
+    // master and meta are not colocated. Fix when this favored nodes feature is actually used
+    // someday.
+    try (Connection connection = ConnectionFactory.createConnection(conf)) {
+      try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) {
+        metaTable.put(puts);
+      }
     }
     LOG.info("Added " + puts.size() + " regions in META");
   }
@@ -304,7 +306,6 @@ public class FavoredNodeAssignmentHelper {
    * primary/secondary/tertiary RegionServers 
    * @param primaryRSMap
    * @return the map of regions to the servers the region-files should be hosted on
-   * @throws IOException
    */
   public Map<HRegionInfo, ServerName[]> placeSecondaryAndTertiaryWithRestrictions(
       Map<HRegionInfo, ServerName> primaryRSMap) {
@@ -603,4 +604,4 @@ public class FavoredNodeAssignmentHelper {
     }
     return strBuf.toString();
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java
index 694e902..111de62 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java
@@ -75,7 +75,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer {
     List<RegionPlan> plans = new ArrayList<RegionPlan>();
     //perform a scan of the meta to get the latest updates (if any)
     SnapshotOfRegionAssignmentFromMeta snaphotOfRegionAssignment =
-        new SnapshotOfRegionAssignmentFromMeta(super.services.getShortCircuitConnection());
+        new SnapshotOfRegionAssignmentFromMeta(super.services.getConnection());
     try {
       snaphotOfRegionAssignment.initialize();
     } catch (IOException ie) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
index 359315e..adf1004 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
@@ -119,7 +119,7 @@ public class CreateTableHandler extends EventHandler {
     boolean success = false;
     try {
       TableName tableName = this.hTableDescriptor.getTableName();
-      if (MetaTableAccessor.tableExists(this.server.getShortCircuitConnection(), tableName)) {
+      if (MetaTableAccessor.tableExists(this.server.getConnection(), tableName)) {
         throw new TableExistsException(tableName);
       }
       success = true;
@@ -289,6 +289,6 @@ public class CreateTableHandler extends EventHandler {
    */
   protected void addRegionsToMeta(final List<HRegionInfo> regionInfos)
       throws IOException {
-    MetaTableAccessor.addRegionsToMeta(this.server.getShortCircuitConnection(), regionInfos);
+    MetaTableAccessor.addRegionsToMeta(this.server.getConnection(), regionInfos);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
index b36eb95..905f899 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
@@ -135,7 +135,7 @@ public class DeleteTableHandler extends TableEventHandler {
     try {
       // 1. Remove regions from META
       LOG.debug("Deleting regions from META");
-      MetaTableAccessor.deleteRegions(this.server.getShortCircuitConnection(), regions);
+      MetaTableAccessor.deleteRegions(this.server.getConnection(), regions);
 
       // -----------------------------------------------------------------------
       // NOTE: At this point we still have data on disk, but nothing in hbase:meta

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
index 455a6ce..ee97616 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
@@ -80,7 +80,7 @@ public class DisableTableHandler extends EventHandler {
     boolean success = false;
     try {
       // Check if table exists
-      if (!MetaTableAccessor.tableExists(this.server.getShortCircuitConnection(), tableName)) {
+      if (!MetaTableAccessor.tableExists(this.server.getConnection(), tableName)) {
         throw new TableNotFoundException(tableName);
       }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
index 3d48124..280e3e4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
@@ -91,7 +91,7 @@ public class EnableTableHandler extends EventHandler {
     boolean success = false;
     try {
       // Check if table exists
-      if (!MetaTableAccessor.tableExists(this.server.getShortCircuitConnection(), tableName)) {
+      if (!MetaTableAccessor.tableExists(this.server.getConnection(), tableName)) {
         // retainAssignment is true only during recovery.  In normal case it is false
         if (!this.skipTableStateCheck) {
           throw new TableNotFoundException(tableName);
@@ -177,7 +177,7 @@ public class EnableTableHandler extends EventHandler {
         server.getZooKeeper());
     } else {
       tableRegionsAndLocations = MetaTableAccessor.getTableRegionsAndLocations(
-        server.getShortCircuitConnection(), tableName, true);
+        server.getConnection(), tableName, true);
     }
 
     int countOfRegionsInTable = tableRegionsAndLocations.size();

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java
index 73208bc..23e41d2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java
@@ -148,7 +148,7 @@ public class MetaServerShutdownHandler extends ServerShutdownHandler {
       throws InterruptedException, IOException, KeeperException {
     long timeout = this.server.getConfiguration().
         getLong("hbase.catalog.verification.timeout", 1000);
-    if (!server.getMetaTableLocator().verifyMetaRegionLocation(server.getShortCircuitConnection(),
+    if (!server.getMetaTableLocator().verifyMetaRegionLocation(server.getConnection(),
       this.server.getZooKeeper(), timeout)) {
       this.services.getAssignmentManager().assignMeta();
     } else if (serverName.equals(server.getMetaTableLocator().getMetaRegionLocation(

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java
index a778c26..b35de6a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java
@@ -25,15 +25,14 @@ import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.TableDescriptor;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
@@ -44,7 +43,6 @@ import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
 import org.apache.hadoop.hbase.master.MasterServices;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.util.Bytes;
 
 @InterfaceAudience.Private
@@ -101,19 +99,14 @@ public class ModifyTableHandler extends TableEventHandler {
     Set<byte[]> tableRows = new HashSet<byte[]>();
     Scan scan = MetaTableAccessor.getScanForTableName(table);
     scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
-    Table htable = null;
-    try {
-      htable = new HTable(masterServices.getConfiguration(), TableName.META_TABLE_NAME);
-      ResultScanner resScanner = htable.getScanner(scan);
+    Connection connection = this.masterServices.getConnection();
+    try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) {
+      ResultScanner resScanner = metaTable.getScanner(scan);
       for (Result result : resScanner) {
         tableRows.add(result.getRow());
       }
       MetaTableAccessor.removeRegionReplicasFromMeta(tableRows, newReplicaCount,
-          oldReplicaCount - newReplicaCount, masterServices.getShortCircuitConnection());
-    } finally {
-      if (htable != null) {
-        htable.close();
-      }
+        oldReplicaCount - newReplicaCount, masterServices.getConnection());
     }
   }
 


Mime
View raw message