hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject hbase git commit: HBASE-13198 Remove HConnectionManager (Mikhail Antonov)
Date Fri, 20 Mar 2015 16:41:04 GMT
Repository: hbase
Updated Branches:
  refs/heads/master 0d7665441 -> f57dca5e1


HBASE-13198 Remove HConnectionManager (Mikhail Antonov)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f57dca5e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f57dca5e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f57dca5e

Branch: refs/heads/master
Commit: f57dca5e1b0b5c8491e2398e2b26717b480612b1
Parents: 0d76654
Author: stack <stack@apache.org>
Authored: Fri Mar 20 09:41:48 2015 -0700
Committer: stack <stack@apache.org>
Committed: Fri Mar 20 09:41:48 2015 -0700

----------------------------------------------------------------------
 bin/region_mover.rb                             |   4 +-
 bin/region_status.rb                            |   4 +-
 conf/log4j.properties                           |   4 +-
 .../hadoop/hbase/client/ConnectionFactory.java  |   1 -
 .../hadoop/hbase/client/ConnectionManager.java  |  46 +--
 .../apache/hadoop/hbase/client/HConnection.java |  23 +-
 .../hadoop/hbase/client/HConnectionManager.java | 324 -------------------
 .../org/apache/hadoop/hbase/client/HTable.java  |   2 +-
 .../apache/hadoop/hbase/client/MultiAction.java |   2 +-
 .../hbase/client/ReversedScannerCallable.java   |   2 +-
 .../hadoop/hbase/client/ScannerCallable.java    |   2 +-
 .../exceptions/ConnectionClosingException.java  |   2 +-
 .../exceptions/PreemptiveFastFailException.java |   2 +-
 .../src/test/resources/log4j.properties         |   2 +-
 .../src/test/resources/log4j.properties         |   2 +-
 ...egrationTestBigLinkedListWithVisibility.java |   3 +-
 hbase-rest/src/test/resources/log4j.properties  |   2 +-
 .../hbase/tmpl/master/MasterStatusTmpl.jamon    |   1 -
 .../tmpl/master/RegionServerListTmpl.jamon      |   1 -
 .../apache/hadoop/hbase/LocalHBaseCluster.java  |   4 +-
 .../replication/VerifyReplication.java          |  30 +-
 .../HBaseInterClusterReplicationEndpoint.java   |   4 +-
 .../hadoop/hbase/util/ConnectionCache.java      |  13 +-
 .../org/apache/hadoop/hbase/util/HMerge.java    |  18 +-
 .../hadoop/hbase/util/MultiHConnection.java     |   4 +-
 .../apache/hadoop/hbase/wal/WALSplitter.java    |   5 +-
 .../resources/hbase-webapps/master/snapshot.jsp |   1 -
 .../org/apache/hadoop/hbase/TestZooKeeper.java  |   5 +-
 .../hbase/client/HConnectionTestingUtility.java |  16 +-
 .../hadoop/hbase/client/TestFromClientSide.java |  92 +-----
 .../org/apache/hadoop/hbase/client/TestHCM.java |  97 +-----
 .../TestEndToEndSplitTransaction.java           |   4 +-
 .../hadoop/hbase/util/MultiThreadedAction.java  |   4 +-
 .../util/hbck/TestOfflineMetaRebuildBase.java   |   1 -
 .../src/test/resources/log4j.properties         |   2 +-
 35 files changed, 97 insertions(+), 632 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/bin/region_mover.rb
----------------------------------------------------------------------
diff --git a/bin/region_mover.rb b/bin/region_mover.rb
index 565b0d5..cd0f173 100644
--- a/bin/region_mover.rb
+++ b/bin/region_mover.rb
@@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.client.HBaseAdmin
 import org.apache.hadoop.hbase.client.Get
 import org.apache.hadoop.hbase.client.Scan
 import org.apache.hadoop.hbase.client.HTable
-import org.apache.hadoop.hbase.client.HConnectionManager
+import org.apache.hadoop.hbase.client.ConnectionFactory
 import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
 import org.apache.hadoop.hbase.filter.InclusiveStopFilter;
 import org.apache.hadoop.hbase.filter.FilterList;
@@ -243,7 +243,7 @@ end
 
 # Now get list of regions on targetServer
 def getRegions(config, servername)
-  connection = HConnectionManager::getConnection(config);
+  connection = ConnectionFactory::createConnection(config);
   return ProtobufUtil::getOnlineRegions(connection.getAdmin(ServerName.valueOf(servername)));
 end
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/bin/region_status.rb
----------------------------------------------------------------------
diff --git a/bin/region_status.rb b/bin/region_status.rb
index a016afd..52af49e 100644
--- a/bin/region_status.rb
+++ b/bin/region_status.rb
@@ -56,7 +56,7 @@ import org.apache.hadoop.hbase.util.Bytes
 import org.apache.hadoop.hbase.HRegionInfo
 import org.apache.hadoop.hbase.MetaTableAccessor
 import org.apache.hadoop.hbase.HTableDescriptor
-import org.apache.hadoop.hbase.client.HConnectionManager
+import org.apache.hadoop.hbase.client.ConnectionFactory
 
 # disable debug logging on this script for clarity
 log_level = org.apache.log4j.Level::ERROR
@@ -138,7 +138,7 @@ while true
   if $tablename.nil?
     server_count = admin.getClusterStatus().getRegionsCount()
   else
-    connection = HConnectionManager::getConnection(config);
+    connection = ConnectionFactory::createConnection(config);
     server_count = MetaTableAccessor::allTableRegions(connection, $TableName).size()
   end
   print "Region Status: #{server_count} / #{meta_count}\n"

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/conf/log4j.properties
----------------------------------------------------------------------
diff --git a/conf/log4j.properties b/conf/log4j.properties
index 40f47ba..7b0acc0 100644
--- a/conf/log4j.properties
+++ b/conf/log4j.properties
@@ -82,7 +82,7 @@ log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO
 #log4j.logger.org.apache.hadoop.dfs=DEBUG
 # Set this class to log INFO only otherwise its OTT
 # Enable this to get detailed connection error/retry logging.
-# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE
+# log4j.logger.org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation=TRACE
 
 
 # Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)
@@ -90,4 +90,4 @@ log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO
 
 # Uncomment the below if you want to remove logging of client region caching'
 # and scan of hbase:meta messages
-# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO
+# log4j.logger.org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation=INFO

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
index 89378dd..0b4b29a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
@@ -50,7 +50,6 @@ import org.apache.hadoop.hbase.security.UserProvider;
  * Similarly, {@link Connection} also returns {@link Admin} and {@link RegionLocator}
  * implementations.
  *
- * This class replaces {@link HConnectionManager}, which is now deprecated.
  * @see Connection
  * @since 0.99.0
  */

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
index 50e2755..00cd817 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
@@ -335,7 +335,7 @@ final class ConnectionManager {
    * This is the recommended way to create HConnections.
    * {@code
    * ExecutorService pool = ...;
-   * HConnection connection = HConnectionManager.createConnection(conf, pool);
+   * HConnection connection = ConnectionManager.createConnection(conf, pool);
    * HTableInterface table = connection.getTable("mytable");
    * table.get(...);
    * ...
@@ -361,7 +361,7 @@ final class ConnectionManager {
    * This is the recommended way to create HConnections.
    * {@code
    * ExecutorService pool = ...;
-   * HConnection connection = HConnectionManager.createConnection(conf, pool);
+   * HConnection connection = ConnectionManager.createConnection(conf, pool);
    * HTableInterface table = connection.getTable("mytable");
    * table.get(...);
    * ...
@@ -386,7 +386,7 @@ final class ConnectionManager {
    * This is the recommended way to create HConnections.
    * {@code
    * ExecutorService pool = ...;
-   * HConnection connection = HConnectionManager.createConnection(conf, pool);
+   * HConnection connection = ConnectionManager.createConnection(conf, pool);
    * HTableInterface table = connection.getTable("mytable");
    * table.get(...);
    * ...
@@ -425,19 +425,6 @@ final class ConnectionManager {
   }
 
   /**
-   * Delete connection information for the instance specified by passed configuration.
-   * If there are no more references to the designated connection connection, this method will
-   * then close connection to the zookeeper ensemble and let go of all associated resources.
-   *
-   * @param conf configuration whose identity is used to find {@link HConnection} instance.
-   * @deprecated connection caching is going away.
-   */
-  @Deprecated
-  public static void deleteConnection(Configuration conf) {
-    deleteConnection(new HConnectionKey(conf), false);
-  }
-
-  /**
    * Cleanup a known stale connection.
    * This will then close connection to the zookeeper ensemble and let go of all resources.
    *
@@ -450,33 +437,6 @@ final class ConnectionManager {
   }
 
   /**
-   * Delete information for all connections. Close or not the connection, depending on the
-   *  staleConnection boolean and the ref count. By default, you should use it with
-   *  staleConnection to true.
-   * @deprecated connection caching is going away.
-   */
-  @Deprecated
-  public static void deleteAllConnections(boolean staleConnection) {
-    synchronized (CONNECTION_INSTANCES) {
-      Set<HConnectionKey> connectionKeys = new HashSet<HConnectionKey>();
-      connectionKeys.addAll(CONNECTION_INSTANCES.keySet());
-      for (HConnectionKey connectionKey : connectionKeys) {
-        deleteConnection(connectionKey, staleConnection);
-      }
-      CONNECTION_INSTANCES.clear();
-    }
-  }
-
-  /**
-   * Delete information for all connections..
-   * @deprecated kept for backward compatibility, but the behavior is broken. HBASE-8983
-   */
-  @Deprecated
-  public static void deleteAllConnections() {
-    deleteAllConnections(false);
-  }
-
-  /**
    * @deprecated connection caching is going away.
    */
   @Deprecated

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
index f185cb2..e4f05b0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
 /**
  * A cluster connection.  Knows how to find the master, locate regions out on the cluster,
  * keeps a cache of locations and then knows how to re-calibrate after they move.  You need one
- * of these to talk to your HBase cluster. {@link HConnectionManager} manages instances of this
+ * of these to talk to your HBase cluster. {@link ConnectionFactory} manages instances of this
  * class.  See it for how to get one of these.
  *
  * <p>This is NOT a connection to a particular server but to ALL servers in the cluster.  Individual
@@ -49,11 +49,12 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
  * HConnection instances can be shared.  Sharing
  * is usually what you want because rather than each HConnection instance
  * having to do its own discovery of regions out on the cluster, instead, all
- * clients get to share the one cache of locations.  {@link HConnectionManager} does the
+ * clients get to share the one cache of locations.  {@link ConnectionManager} does the
  * sharing for you if you go by it getting connections.  Sharing makes cleanup of
- * HConnections awkward.  See {@link HConnectionManager} for cleanup discussion.
+ * HConnections awkward.  See {@link ConnectionFactory} for cleanup discussion.
  *
- * @see HConnectionManager
+ * @see ConnectionManager
+ * @see ConnectionFactory
  * @deprecated in favor of {@link Connection} and {@link ConnectionFactory}
  */
 @InterfaceAudience.Public
@@ -79,7 +80,7 @@ public interface HConnection extends Connection {
    * This is a lightweight operation, pooling or caching of the returned HTableInterface
    * is neither required nor desired.
    * Note that the HConnection needs to be unmanaged
-   * (created with {@link HConnectionManager#createConnection(Configuration)}).
+   * (created with {@link ConnectionFactory#createConnection(Configuration)}).
    * @param tableName
    * @return an HTable to use for interactions with this table
    */
@@ -92,7 +93,7 @@ public interface HConnection extends Connection {
    * This is a lightweight operation, pooling or caching of the returned HTableInterface
    * is neither required nor desired.
    * Note that the HConnection needs to be unmanaged
-   * (created with {@link HConnectionManager#createConnection(Configuration)}).
+   * (created with {@link ConnectionFactory#createConnection(Configuration)}).
    * @param tableName
    * @return an HTable to use for interactions with this table
    */
@@ -105,7 +106,7 @@ public interface HConnection extends Connection {
    * This is a lightweight operation, pooling or caching of the returned HTableInterface
    * is neither required nor desired.
    * Note that the HConnection needs to be unmanaged
-   * (created with {@link HConnectionManager#createConnection(Configuration)}).
+   * (created with {@link ConnectionFactory#createConnection(Configuration)}).
    * @param tableName
    * @return an HTable to use for interactions with this table
    */
@@ -119,7 +120,7 @@ public interface HConnection extends Connection {
    * This is a lightweight operation, pooling or caching of the returned HTableInterface
    * is neither required nor desired.
    * Note that the HConnection needs to be unmanaged
-   * (created with {@link HConnectionManager#createConnection(Configuration)}).
+   * (created with {@link ConnectionFactory#createConnection(Configuration)}).
    * @param tableName
    * @param pool The thread pool to use for batch operations, null to use a default pool.
    * @return an HTable to use for interactions with this table
@@ -133,7 +134,7 @@ public interface HConnection extends Connection {
    * This is a lightweight operation, pooling or caching of the returned HTableInterface
    * is neither required nor desired.
    * Note that the HConnection needs to be unmanaged
-   * (created with {@link HConnectionManager#createConnection(Configuration)}).
+   * (created with {@link ConnectionFactory#createConnection(Configuration)}).
    * @param tableName
    * @param pool The thread pool to use for batch operations, null to use a default pool.
    * @return an HTable to use for interactions with this table
@@ -147,7 +148,7 @@ public interface HConnection extends Connection {
    * This is a lightweight operation, pooling or caching of the returned HTableInterface
    * is neither required nor desired.
    * Note that the HConnection needs to be unmanaged
-   * (created with {@link HConnectionManager#createConnection(Configuration)}).
+   * (created with {@link ConnectionFactory#createConnection(Configuration)}).
    * @param tableName
    * @param pool The thread pool to use for batch operations, null to use a default pool.
    * @return an HTable to use for interactions with this table
@@ -163,7 +164,7 @@ public interface HConnection extends Connection {
    * required nor desired.
    *
    * RegionLocator needs to be unmanaged
-   * (created with {@link HConnectionManager#createConnection(Configuration)}).
+   * (created with {@link ConnectionFactory#createConnection(Configuration)}).
    *
    * @param tableName Name of the table who's region is to be examined
    * @return A RegionLocator instance

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
deleted file mode 100644
index edd071b..0000000
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
+++ /dev/null
@@ -1,324 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.client;
-
-import java.io.IOException;
-import java.util.concurrent.ExecutorService;
-
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.security.User;
-
-/**
- * A non-instantiable class that manages creation of {@link HConnection}s.
- * <p>The simplest way to use this class is by using {@link #createConnection(Configuration)}.
- * This creates a new {@link HConnection} to the cluster that is managed by the caller.
- * From this {@link HConnection} {@link HTableInterface} implementations are retrieved
- * with {@link HConnection#getTable(byte[])}. Example:
- * <pre>
- * HConnection connection = HConnectionManager.createConnection(config);
- * HTableInterface table = connection.getTable(TableName.valueOf("table1"));
- * try {
- *   // Use the table as needed, for a single operation and a single thread
- * } finally {
- *   table.close();
- *   connection.close();
- * }
- * </pre>
- * <p>This class has a static Map of {@link HConnection} instances keyed by
- * {@link HConnectionKey}; A {@link HConnectionKey} is identified by a set of
- * {@link Configuration} properties. Invocations of {@link #getConnection(Configuration)}
- * that pass the same {@link Configuration} instance will return the same
- * {@link  HConnection} instance ONLY WHEN the set of properties are the same
- * (i.e. if you change properties in your {@link Configuration} instance, such as RPC timeout,
- * the codec used, HBase will create a new {@link HConnection} instance. For more details on
- * how this is done see {@link HConnectionKey}).
- * <p>Sharing {@link HConnection} instances is usually what you want; all clients
- * of the {@link HConnection} instances share the HConnections' cache of Region
- * locations rather than each having to discover for itself the location of meta, etc.
- * But sharing connections makes clean up of {@link HConnection} instances a little awkward.
- * Currently, clients cleanup by calling {@link #deleteConnection(Configuration)}. This will
- * shutdown the zookeeper connection the HConnection was using and clean up all
- * HConnection resources as well as stopping proxies to servers out on the
- * cluster. Not running the cleanup will not end the world; it'll
- * just stall the closeup some and spew some zookeeper connection failed
- * messages into the log.  Running the cleanup on a {@link HConnection} that is
- * subsequently used by another will cause breakage so be careful running
- * cleanup.
- * <p>To create a {@link HConnection} that is not shared by others, you can
- * set property "hbase.client.instance.id" to a unique value for your {@link Configuration}
- * instance, like the following:
- * <pre>
- * {@code
- * conf.set("hbase.client.instance.id", "12345");
- * HConnection connection = HConnectionManager.getConnection(conf);
- * // Use the connection to your hearts' delight and then when done...
- * conf.set("hbase.client.instance.id", "12345");
- * HConnectionManager.deleteConnection(conf, true);
- * }
- * </pre>
- * <p>Cleanup used to be done inside in a shutdown hook.  On startup we'd
- * register a shutdown hook that called {@link #deleteAllConnections()}
- * on its way out but the order in which shutdown hooks run is not defined so
- * were problematic for clients of HConnection that wanted to register their
- * own shutdown hooks so we removed ours though this shifts the onus for
- * cleanup to the client.
- * @deprecated Please use ConnectionFactory instead
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-@Deprecated
-public final class HConnectionManager extends ConnectionFactory {
-
-  /** @deprecated connection caching is going away */
-  @Deprecated
-  public static final String RETRIES_BY_SERVER_KEY =
-      ConnectionManager.RETRIES_BY_SERVER_KEY;
-
-  /** @deprecated connection caching is going away */
-  @Deprecated
-  public static final int MAX_CACHED_CONNECTION_INSTANCES =
-      ConnectionManager.MAX_CACHED_CONNECTION_INSTANCES;
-
-  /*
-   * Non-instantiable.
-   */
-  private HConnectionManager() {
-    super();
-  }
-
-  /**
-   * Get the connection that goes with the passed <code>conf</code> configuration instance.
-   * If no current connection exists, method creates a new connection and keys it using
-   * connection-specific properties from the passed {@link Configuration}; see
-   * {@link HConnectionKey}.
-   * @param conf configuration
-   * @return HConnection object for <code>conf</code>
-   * @deprecated connection caching is going away
-   */
-  @Deprecated
-  public static HConnection getConnection(final Configuration conf) throws IOException {
-    return ConnectionManager.getConnectionInternal(conf);
-  }
-
-  /**
-   * Create a new HConnection instance using the passed <code>conf</code> instance.
-   * <p>Note: This bypasses the usual HConnection life cycle management done by
-   * {@link #getConnection(Configuration)}. The caller is responsible for
-   * calling {@link HConnection#close()} on the returned connection instance.
-   *
-   * This is the recommended way to create HConnections.
-   * <pre>
-   * HConnection connection = HConnectionManager.createConnection(conf);
-   * HTableInterface table = connection.getTable("mytable");
-   * try {
-   *   table.get(...);
-   *   ...
-   * } finally {
-   *   table.close();
-   *   connection.close();
-   * }
-   * </pre>
-   *
-   * @param conf configuration
-   * @return HConnection object for <code>conf</code>
-   * @deprecated in favor of {@link Connection} and {@link ConnectionFactory}
-   */
-  @Deprecated
-  public static HConnection createConnection(Configuration conf) throws IOException {
-    return ConnectionManager.createConnectionInternal(conf);
-  }
-
-
-  /**
-   * Create a new HConnection instance using the passed <code>conf</code> instance.
-   * <p>Note: This bypasses the usual HConnection life cycle management done by
-   * {@link #getConnection(Configuration)}. The caller is responsible for
-   * calling {@link HConnection#close()} on the returned connection instance.
-   * This is the recommended way to create HConnections.
-   * <pre>
-   * ExecutorService pool = ...;
-   * HConnection connection = HConnectionManager.createConnection(conf, pool);
-   * HTableInterface table = connection.getTable("mytable");
-   * table.get(...);
-   * ...
-   * table.close();
-   * connection.close();
-   * </pre>
-   * @param conf configuration
-   * @param pool the thread pool to use for batch operation in HTables used via this HConnection
-   * @return HConnection object for <code>conf</code>
-   * @deprecated in favor of {@link Connection} and {@link ConnectionFactory}
-   */
-  @Deprecated
-  public static HConnection createConnection(Configuration conf, ExecutorService pool)
-      throws IOException {
-    return ConnectionManager.createConnection(conf, pool);
-  }
-
-  /**
-   * Create a new HConnection instance using the passed <code>conf</code> instance.
-   * <p>Note: This bypasses the usual HConnection life cycle management done by
-   * {@link #getConnection(Configuration)}. The caller is responsible for
-   * calling {@link HConnection#close()} on the returned connection instance.
-   * This is the recommended way to create HConnections.
-   * <pre>
-   * ExecutorService pool = ...;
-   * HConnection connection = HConnectionManager.createConnection(conf, pool);
-   * HTableInterface table = connection.getTable("mytable");
-   * table.get(...);
-   * ...
-   * table.close();
-   * connection.close();
-   * </pre>
-   * @param conf configuration
-   * @param user the user the connection is for
-   * @return HConnection object for <code>conf</code>
-   * @deprecated in favor of {@link Connection} and {@link ConnectionFactory}
-   */
-  @Deprecated
-  public static HConnection createConnection(Configuration conf, User user)
-  throws IOException {
-    return ConnectionManager.createConnection(conf, user);
-  }
-
-  /**
-   * Create a new HConnection instance using the passed <code>conf</code> instance.
-   * <p>Note: This bypasses the usual HConnection life cycle management done by
-   * {@link #getConnection(Configuration)}. The caller is responsible for
-   * calling {@link HConnection#close()} on the returned connection instance.
-   * This is the recommended way to create HConnections.
-   * <pre>
-   * ExecutorService pool = ...;
-   * HConnection connection = HConnectionManager.createConnection(conf, pool);
-   * HTableInterface table = connection.getTable("mytable");
-   * table.get(...);
-   * ...
-   * table.close();
-   * connection.close();
-   * </pre>
-   * @param conf configuration
-   * @param pool the thread pool to use for batch operation in HTables used via this HConnection
-   * @param user the user the connection is for
-   * @return HConnection object for <code>conf</code>
-   * @deprecated in favor of {@link Connection} and {@link ConnectionFactory}
-   */
-  @Deprecated
-  public static HConnection createConnection(Configuration conf, ExecutorService pool, User user)
-  throws IOException {
-    return ConnectionManager.createConnection(conf, pool, user);
-  }
-
-  /**
-   * @deprecated in favor of {@link Connection} and {@link ConnectionFactory}
-   */
-  @Deprecated
-  static HConnection createConnection(final Configuration conf, final boolean managed)
-      throws IOException {
-    return ConnectionManager.createConnection(conf, managed);
-  }
-
-  /**
-   * @deprecated in favor of {@link Connection} and {@link ConnectionFactory}
-   */
-  @Deprecated
-  static ClusterConnection createConnection(final Configuration conf, final boolean managed,
-      final ExecutorService pool, final User user) throws IOException {
-    return ConnectionManager.createConnection(conf, managed, pool, user);
-  }
-
-  /**
-   * Delete connection information for the instance specified by passed configuration.
-   * If there are no more references to the designated connection connection, this method will
-   * then close connection to the zookeeper ensemble and let go of all associated resources.
-   *
-   * @param conf configuration whose identity is used to find {@link HConnection} instance.
-   * @deprecated connection caching is going away.
-   */
-  @Deprecated
-  public static void deleteConnection(Configuration conf) {
-    ConnectionManager.deleteConnection(conf);
-  }
-
-  /**
-   * Cleanup a known stale connection.
-   * This will then close connection to the zookeeper ensemble and let go of all resources.
-   *
-   * @param connection
-   * @deprecated connection caching is going away.
-   */
-  @Deprecated
-  public static void deleteStaleConnection(HConnection connection) {
-    ConnectionManager.deleteStaleConnection(connection);
-  }
-
-  /**
-   * Delete information for all connections. Close or not the connection, depending on the
-   *  staleConnection boolean and the ref count. By default, you should use it with
-   *  staleConnection to true.
-   * @deprecated connection caching is going away.
-   */
-  @Deprecated
-  public static void deleteAllConnections(boolean staleConnection) {
-    ConnectionManager.deleteAllConnections(staleConnection);
-  }
-
-  /**
-   * Delete information for all connections..
-   * @deprecated kept for backward compatibility, but the behavior is broken. HBASE-8983
-   */
-  @Deprecated
-  public static void deleteAllConnections() {
-    ConnectionManager.deleteAllConnections();
-  }
-
-  /**
-   * This convenience method invokes the given {@link HConnectable#connect}
-   * implementation using a {@link HConnection} instance that lasts just for the
-   * duration of the invocation.
-   *
-   * @param <T> the return type of the connect method
-   * @param connectable the {@link HConnectable} instance
-   * @return the value returned by the connect method
-   * @throws IOException
-   * @deprecated Internal method, do not use thru HConnectionManager.
-   */
-  @InterfaceAudience.Private
-  @Deprecated
-  public static <T> T execute(HConnectable<T> connectable) throws IOException {
-    return ConnectionManager.execute(connectable);
-  }
-
-  /**
-   * Set the number of retries to use serverside when trying to communicate
-   * with another server over {@link HConnection}.  Used updating catalog
-   * tables, etc.  Call this method before we create any Connections.
-   * @param c The Configuration instance to set the retries into.
-   * @param log Used to log what we set in here.
-   * @deprecated Internal method, do not use.
-   */
-  @InterfaceAudience.Private
-  @Deprecated
-  public static void setServerSideHConnectionRetries(
-      final Configuration c, final String sn, final Log log) {
-    ConnectionUtils.setServerSideHConnectionRetriesConfig(c, sn, log);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index 2a8063a..c77e2ae 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -319,7 +319,7 @@ public class HTable implements HTableInterface {
   @Deprecated
   public static boolean isTableEnabled(Configuration conf,
       final TableName tableName) throws IOException {
-    return HConnectionManager.execute(new HConnectable<Boolean>(conf) {
+    return ConnectionManager.execute(new HConnectable<Boolean>(conf) {
       @Override
       public Boolean connect(HConnection connection) throws IOException {
         return connection.isTableEnabled(tableName);

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java
index b44803b..6110f0d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 
 /**
  * Container for Actions (i.e. Get, Delete, or Put), which are grouped by
- * regionName. Intended to be used with HConnectionManager.processBatch()
+ * regionName. Intended to be used with ConnectionManager.processBatch()
  */
 @InterfaceAudience.Private
 public final class MultiAction<R> {

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java
index 4a57adf..a9c903e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java
@@ -129,7 +129,7 @@ public class ReversedScannerCallable extends ScannerCallable {
     }
 
     // check how often we retry.
-    // HConnectionManager will call instantiateServer with reload==true
+    // ConnectionManager will call instantiateServer with reload==true
     // if and only if for retries.
     if (reload && this.scanMetrics != null) {
       this.scanMetrics.countOfRPCRetries.incrementAndGet();

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
index 6d5bb9e..226782c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
@@ -153,7 +153,7 @@ public class ScannerCallable extends RegionServerCallable<Result[]> {
     }
 
     // check how often we retry.
-    // HConnectionManager will call instantiateServer with reload==true
+    // ConnectionManager will call instantiateServer with reload==true
     // if and only if for retries.
     if (reload && this.scanMetrics != null) {
       this.scanMetrics.countOfRPCRetries.incrementAndGet();

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosingException.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosingException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosingException.java
index 49134f1..43a4ee4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosingException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosingException.java
@@ -45,7 +45,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
 * Thrown when the client believes that we are trying to communicate to has
 * been repeatedly unresponsive for a while.
 *
-* On receiving such an exception. The HConnectionManager will skip all
+* On receiving such an exception. The ConnectionManager will skip all
 * retries and fast fail the operation.
 */
 @InterfaceAudience.Public

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.java
index 51c960d..6ca1d88 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.ServerName;
  * Thrown when the client believes that we are trying to communicate to has
  * been repeatedly unresponsive for a while.
  *
- * On receiving such an exception. The HConnectionManager will skip all
+ * On receiving such an exception. The ConnectionManager will skip all
  * retries and fast fail the operation.
  */
  @InterfaceAudience.Public

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-client/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/resources/log4j.properties b/hbase-client/src/test/resources/log4j.properties
index 6ee91ef..69171f7 100644
--- a/hbase-client/src/test/resources/log4j.properties
+++ b/hbase-client/src/test/resources/log4j.properties
@@ -63,4 +63,4 @@ log4j.logger.org.apache.hadoop.hbase=DEBUG
 log4j.org.apache.hadoop.metrics2.impl.MetricsSystemImpl=ERROR
 log4j.org.apache.hadoop.metrics2.util.MBeans=ERROR
 # Enable this to get detailed connection error/retry logging.
-# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE
+# log4j.logger.org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation=TRACE

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-common/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/hbase-common/src/test/resources/log4j.properties b/hbase-common/src/test/resources/log4j.properties
index 6ee91ef..69171f7 100644
--- a/hbase-common/src/test/resources/log4j.properties
+++ b/hbase-common/src/test/resources/log4j.properties
@@ -63,4 +63,4 @@ log4j.logger.org.apache.hadoop.hbase=DEBUG
 log4j.org.apache.hadoop.metrics2.impl.MetricsSystemImpl=ERROR
 log4j.org.apache.hadoop.metrics2.util.MBeans=ERROR
 # Enable this to get detailed connection error/retry logging.
-# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE
+# log4j.logger.org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation=TRACE

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java
index 9d07479..b82c750 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java
@@ -44,7 +44,6 @@ import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
@@ -450,7 +449,7 @@ public class IntegrationTestBigLinkedListWithVisibility extends IntegrationTestB
     @Override
     protected void handleFailure(Counters counters) throws IOException {
       Configuration conf = job.getConfiguration();
-      HConnection conn = HConnectionManager.getConnection(conf);
+      HConnection conn = (HConnection) ConnectionFactory.createConnection(conf);
       TableName tableName = TableName.valueOf(COMMON_TABLE_NAME);
       CounterGroup g = counters.getGroup("undef");
       Iterator<Counter> it = g.iterator();

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-rest/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/resources/log4j.properties b/hbase-rest/src/test/resources/log4j.properties
index 6ee91ef..69171f7 100644
--- a/hbase-rest/src/test/resources/log4j.properties
+++ b/hbase-rest/src/test/resources/log4j.properties
@@ -63,4 +63,4 @@ log4j.logger.org.apache.hadoop.hbase=DEBUG
 log4j.org.apache.hadoop.metrics2.impl.MetricsSystemImpl=ERROR
 log4j.org.apache.hadoop.metrics2.util.MBeans=ERROR
 # Enable this to get detailed connection error/retry logging.
-# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE
+# log4j.logger.org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation=TRACE

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
index ceda843..b3445d4 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
@@ -42,7 +42,6 @@ org.apache.hadoop.hbase.NamespaceDescriptor;
 org.apache.hadoop.hbase.ServerLoad;
 org.apache.hadoop.hbase.ServerName;
 org.apache.hadoop.hbase.client.Admin;
-org.apache.hadoop.hbase.client.HConnectionManager;
 org.apache.hadoop.hbase.HRegionInfo;
 org.apache.hadoop.hbase.master.RegionState;
 org.apache.hadoop.hbase.HTableDescriptor;

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
index f063e74..49addc7 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
@@ -34,7 +34,6 @@ HMaster master;
         org.apache.hadoop.hbase.ServerLoad;
         org.apache.hadoop.hbase.ServerName;
         org.apache.hadoop.hbase.client.HBaseAdmin;
-        org.apache.hadoop.hbase.client.HConnectionManager;
         org.apache.hadoop.hbase.HTableDescriptor;
         org.apache.hadoop.hbase.HBaseConfiguration;
 </%import>

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
index eacba6f..17fc34f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
@@ -173,7 +173,7 @@ public class LocalHBaseCluster {
   throws IOException {
     // Create each regionserver with its own Configuration instance so each has
     // its HConnection instance rather than share (see HBASE_INSTANCES down in
-    // the guts of HConnectionManager.
+    // the guts of ConnectionManager).
 
     // Also, create separate CoordinatedStateManager instance per Server.
     // This is special case when we have to have more than 1 CoordinatedStateManager
@@ -206,7 +206,7 @@ public class LocalHBaseCluster {
   throws IOException {
     // Create each master with its own Configuration instance so each has
     // its HConnection instance rather than share (see HBASE_INSTANCES down in
-    // the guts of HConnectionManager.
+    // the guts of ConnectionManager.
 
     // Also, create separate CoordinatedStateManager instance per Server.
     // This is special case when we have to have more than 1 CoordinatedStateManager

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
index b6d43de..28f9f39 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
@@ -30,9 +30,6 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HConnectable;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
@@ -129,22 +126,17 @@ public class VerifyReplication extends Configured implements Tool {
         }
 
         final TableSplit tableSplit = (TableSplit)(context.getInputSplit());
-        HConnectionManager.execute(new HConnectable<Void>(conf) {
-          @Override
-          public Void connect(HConnection conn) throws IOException {
-            String zkClusterKey = conf.get(NAME + ".peerQuorumAddress");
-            Configuration peerConf = HBaseConfiguration.create(conf);
-            ZKUtil.applyClusterKeyToConf(peerConf, zkClusterKey);
-
-            TableName tableName = TableName.valueOf(conf.get(NAME + ".tableName"));
-            connection = ConnectionFactory.createConnection(peerConf);
-            replicatedTable = connection.getTable(tableName);
-            scan.setStartRow(value.getRow());
-            scan.setStopRow(tableSplit.getEndRow());
-            replicatedScanner = replicatedTable.getScanner(scan);
-            return null;
-          }
-        });
+
+        String zkClusterKey = conf.get(NAME + ".peerQuorumAddress");
+        Configuration peerConf = HBaseConfiguration.create(conf);
+        ZKUtil.applyClusterKeyToConf(peerConf, zkClusterKey);
+
+        TableName tableName = TableName.valueOf(conf.get(NAME + ".tableName"));
+        connection = ConnectionFactory.createConnection(peerConf);
+        replicatedTable = connection.getTable(tableName);
+        scan.setStartRow(value.getRow());
+        scan.setStopRow(tableSplit.getEndRow());
+        replicatedScanner = replicatedTable.getScanner(scan);
         currentCompareRowInPeerTable = replicatedScanner.next();
       }
       while (true) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
index 95c253d..1a53c24 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
@@ -30,8 +30,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
@@ -82,7 +82,7 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi
     // TODO: This connection is replication specific or we should make it particular to
     // replication and make replication specific settings such as compression or codec to use
     // passing Cells.
-    this.conn = HConnectionManager.createConnection(this.conf);
+    this.conn = (HConnection) ConnectionFactory.createConnection(this.conf);
     this.sleepForRetries =
         this.conf.getLong("replication.source.sleepforretries", 1000);
     this.metrics = context.getMetrics();

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java
index 8fa711c..bbfed4c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java
@@ -27,10 +27,11 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ChoreService;
 import org.apache.hadoop.hbase.ScheduledChore;
 import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
@@ -148,7 +149,7 @@ public class ConnectionCache {
    */
   public Table getTable(String tableName) throws IOException {
     ConnectionInfo connInfo = getCurrentConnection();
-    return connInfo.connection.getTable(tableName);
+    return connInfo.connection.getTable(TableName.valueOf(tableName));
   }
 
   /**
@@ -168,7 +169,7 @@ public class ConnectionCache {
             ugi = UserGroupInformation.createProxyUser(userName, realUser);
           }
           User user = userProvider.create(ugi);
-          HConnection conn = HConnectionManager.createConnection(conf, user);
+          Connection conn = ConnectionFactory.createConnection(conf, user);
           connInfo = new ConnectionInfo(conn, userName);
           connections.put(userName, connInfo);
         }
@@ -180,14 +181,14 @@ public class ConnectionCache {
   }
 
   class ConnectionInfo {
-    final HConnection connection;
+    final Connection connection;
     final String userName;
 
     volatile HBaseAdmin admin;
     private long lastAccessTime;
     private boolean closed;
 
-    ConnectionInfo(HConnection conn, String user) {
+    ConnectionInfo(Connection conn, String user) {
       lastAccessTime = EnvironmentEdgeManager.currentTime();
       connection = conn;
       closed = false;

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
index 78c7a06..96e0d48 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
@@ -41,9 +41,7 @@ import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HConnectable;
 import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Table;
@@ -106,14 +104,16 @@ class HMerge {
     final TableName tableName, final boolean testMasterRunning)
   throws IOException {
     boolean masterIsRunning = false;
+    HConnection hConnection = null;
     if (testMasterRunning) {
-      masterIsRunning = HConnectionManager
-          .execute(new HConnectable<Boolean>(conf) {
-            @Override
-            public Boolean connect(HConnection connection) throws IOException {
-              return connection.isMasterRunning();
-            }
-          });
+      try {
+        hConnection = (HConnection) ConnectionFactory.createConnection(conf);
+        masterIsRunning = hConnection.isMasterRunning();
+      } finally {
+        if (hConnection != null) {
+          hConnection.close();
+        }
+      }
     }
     if (tableName.equals(TableName.META_TABLE_NAME)) {
       if (masterIsRunning) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java
index 81678aa..ba76c6d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java
@@ -35,8 +35,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.Row;
 import org.apache.hadoop.hbase.client.coprocessor.Batch;
 
@@ -64,7 +64,7 @@ public class MultiHConnection {
     synchronized (this.hConnectionsLock) {
       hConnections = new HConnection[noOfConnections];
       for (int i = 0; i < noOfConnections; i++) {
-        HConnection conn = HConnectionManager.createConnection(conf);
+        HConnection conn = (HConnection) ConnectionFactory.createConnection(conf);
         hConnections[i] = conn;
       }
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index e3e6bc4..517b67e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
@@ -70,11 +70,12 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.ConnectionUtils;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.TableState;
@@ -2140,7 +2141,7 @@ public class WALSplitter {
         synchronized (this.tableNameToHConnectionMap) {
           hconn = this.tableNameToHConnectionMap.get(tableName);
           if (hconn == null) {
-            hconn = HConnectionManager.getConnection(conf);
+            hconn = (HConnection) ConnectionFactory.createConnection(conf);
             this.tableNameToHConnectionMap.put(tableName, hconn);
           }
         }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp b/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp
index 831835e..50a7560 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp
@@ -21,7 +21,6 @@
   import="java.util.Date"
   import="org.apache.hadoop.conf.Configuration"
   import="org.apache.hadoop.hbase.client.Admin"
-  import="org.apache.hadoop.hbase.client.HConnectionManager"
   import="org.apache.hadoop.hbase.master.HMaster"
   import="org.apache.hadoop.hbase.snapshot.SnapshotInfo"
   import="org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java
index c9d984f..23423fa 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java
@@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
@@ -285,9 +284,9 @@ public class TestZooKeeper {
 
     // make sure they aren't the same
     ZooKeeperWatcher z1 =
-      getZooKeeperWatcher(HConnectionManager.getConnection(localMeta.getConfiguration()));
+      getZooKeeperWatcher(ConnectionFactory.createConnection(localMeta.getConfiguration()));
     ZooKeeperWatcher z2 =
-      getZooKeeperWatcher(HConnectionManager.getConnection(otherConf));
+      getZooKeeperWatcher(ConnectionFactory.createConnection(otherConf));
     assertFalse(z1 == z2);
     assertFalse(z1.getQuorum().equals(z2.getQuorum()));
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
index 0d05c68..b8d26fb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
@@ -39,15 +39,14 @@ public class HConnectionTestingUtility {
   /*
    * Not part of {@link HBaseTestingUtility} because this class is not
    * in same package as {@link HConnection}.  Would have to reveal ugly
-   * {@link HConnectionManager} innards to HBaseTestingUtility to give it access.
+   * {@link ConnectionManager} innards to HBaseTestingUtility to give it access.
    */
   /**
    * Get a Mocked {@link HConnection} that goes with the passed <code>conf</code>
    * configuration instance.  Minimally the mock will return
    * <code>conf</conf> when {@link ClusterConnection#getConfiguration()} is invoked.
    * Be sure to shutdown the connection when done by calling
-   * {@link HConnectionManager#deleteConnection(Configuration)} else it
-   * will stick around; this is probably not what you want.
+   * {@link Connection#close()} else it will stick around; this is probably not what you want.
    * @param conf configuration
    * @return HConnection object for <code>conf</code>
    * @throws ZooKeeperConnectionException
@@ -71,9 +70,8 @@ public class HConnectionTestingUtility {
    * Calls {@link #getMockedConnection(Configuration)} and then mocks a few
    * more of the popular {@link ClusterConnection} methods so they do 'normal'
    * operation (see return doc below for list). Be sure to shutdown the
-   * connection when done by calling
-   * {@link HConnectionManager#deleteConnection(Configuration)} else it
-   * will stick around; this is probably not what you want.
+   * connection when done by calling {@link Connection#close()} else it will stick around;
+   * this is probably not what you want.
    *
    * @param conf Configuration to use
    * @param admin An AdminProtocol; can be null but is usually
@@ -92,8 +90,7 @@ public class HConnectionTestingUtility {
    * {@link ClusterConnection#getAdmin(ServerName)} is called, returns the passed
    * {@link ClientProtos.ClientService.BlockingInterface} instance when
    * {@link ClusterConnection#getClient(ServerName)} is called (Be sure to call
-   * {@link HConnectionManager#deleteConnection(Configuration)}
-   * when done with this mocked Connection.
+   * {@link Connection#close()} when done with this mocked Connection.
    * @throws IOException
    */
   public static ClusterConnection getMockedConnectionAndDecorate(final Configuration conf,
@@ -146,8 +143,7 @@ public class HConnectionTestingUtility {
    * Get a Mockito spied-upon {@link ClusterConnection} that goes with the passed
    * <code>conf</code> configuration instance.
    * Be sure to shutdown the connection when done by calling
-   * {@link HConnectionManager#deleteConnection(Configuration)} else it
-   * will stick around; this is probably not what you want.
+   * {@link Connection#close()} else it will stick around; this is probably not what you want.
    * @param conf configuration
    * @return HConnection object for <code>conf</code>
    * @throws ZooKeeperConnectionException

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index b3c631a..a4ceaa8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -285,96 +285,6 @@ public class TestFromClientSide {
      table.close();
    }
 
-   /**
-    * @deprecated Tests deprecated functionality. Remove when we are past 1.0.
-    * @throws Exception
-    */
-   @Deprecated
-   @Test
-   public void testSharedZooKeeper() throws Exception {
-     Configuration newConfig = new Configuration(TEST_UTIL.getConfiguration());
-     newConfig.set(HConstants.HBASE_CLIENT_INSTANCE_ID, "12345");
-
-     // First with a simple ZKW
-     ZooKeeperWatcher z0 = new ZooKeeperWatcher(
-       newConfig, "hconnection", new Abortable() {
-       @Override public void abort(String why, Throwable e) {}
-       @Override public boolean isAborted() {return false;}
-     });
-     z0.getRecoverableZooKeeper().getZooKeeper().exists("/oldZooKeeperWatcher", false);
-     z0.close();
-
-     // Then a ZooKeeperKeepAliveConnection
-     ConnectionManager.HConnectionImplementation connection1 =
-       (ConnectionManager.HConnectionImplementation)
-         HConnectionManager.getConnection(newConfig);
-
-     ZooKeeperKeepAliveConnection z1 = connection1.getKeepAliveZooKeeperWatcher();
-     z1.getRecoverableZooKeeper().getZooKeeper().exists("/z1", false);
-
-     z1.close();
-
-     // will still work, because the real connection is not closed yet
-     // Not do be done in real code
-     z1.getRecoverableZooKeeper().getZooKeeper().exists("/z1afterclose", false);
-
-
-     ZooKeeperKeepAliveConnection z2 = connection1.getKeepAliveZooKeeperWatcher();
-     assertTrue(
-       "ZooKeeperKeepAliveConnection equals on same connection", z1 == z2);
-
-
-
-     Configuration newConfig2 = new Configuration(TEST_UTIL.getConfiguration());
-     newConfig2.set(HConstants.HBASE_CLIENT_INSTANCE_ID, "6789");
-     ConnectionManager.HConnectionImplementation connection2 =
-       (ConnectionManager.HConnectionImplementation)
-         HConnectionManager.getConnection(newConfig2);
-
-     assertTrue("connections should be different ", connection1 != connection2);
-
-     ZooKeeperKeepAliveConnection z3 = connection2.getKeepAliveZooKeeperWatcher();
-     assertTrue(
-       "ZooKeeperKeepAliveConnection should be different" +
-         " on different connections", z1 != z3);
-
-     // Bypass the private access
-     Method m = ConnectionManager.HConnectionImplementation.class.
-       getDeclaredMethod("closeZooKeeperWatcher");
-     m.setAccessible(true);
-     m.invoke(connection2);
-
-     ZooKeeperKeepAliveConnection z4 = connection2.getKeepAliveZooKeeperWatcher();
-     assertTrue(
-       "ZooKeeperKeepAliveConnection should be recreated" +
-         " when previous connections was closed"
-       , z3 != z4);
-
-
-     z2.getRecoverableZooKeeper().getZooKeeper().exists("/z2", false);
-     z4.getRecoverableZooKeeper().getZooKeeper().exists("/z4", false);
-
-
-     HConnectionManager.deleteConnection(newConfig);
-     try {
-       z2.getRecoverableZooKeeper().getZooKeeper().exists("/z2", false);
-       assertTrue("We should not have a valid connection for z2", false);
-     } catch (Exception e){
-     }
-
-     z4.getRecoverableZooKeeper().getZooKeeper().exists("/z4", false);
-     // We expect success here.
-
-
-     HConnectionManager.deleteConnection(newConfig2);
-     try {
-       z4.getRecoverableZooKeeper().getZooKeeper().exists("/z4", false);
-       assertTrue("We should not have a valid connection for z4", false);
-     } catch (Exception e){
-     }
-   }
-
-
   /**
    * Verifies that getConfiguration returns the same Configuration object used
    * to create the HTable instance.
@@ -4127,7 +4037,7 @@ public class TestFromClientSide {
    */
   HTable createUnmangedHConnectionHTable(final TableName tableName) throws IOException {
     TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY);
-    HConnection conn = HConnectionManager.createConnection(TEST_UTIL.getConfiguration());
+    HConnection conn = ConnectionManager.createConnection(TEST_UTIL.getConfiguration());
     return (HTable)conn.getTable(tableName);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
index 5d284a2..94c32c9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
@@ -155,8 +155,8 @@ public class TestHCM {
         new SynchronousQueue<Runnable>(),
         Threads.newDaemonThreadFactory("test-hcm"));
 
-    HConnection con1 = HConnectionManager.createConnection(TEST_UTIL.getConfiguration());
-    HConnection con2 = HConnectionManager.createConnection(TEST_UTIL.getConfiguration(), otherPool);
+    HConnection con1 = ConnectionManager.createConnection(TEST_UTIL.getConfiguration());
+    HConnection con2 = ConnectionManager.createConnection(TEST_UTIL.getConfiguration(), otherPool);
     // make sure the internally created ExecutorService is the one passed
     assertTrue(otherPool == ((HConnectionImplementation)con2).getCurrentBatchPool());
 
@@ -537,7 +537,7 @@ public class TestHCM {
     } finally {
       syncBlockingFilter.set(true);
       t.join();
-      HConnectionManager.getConnection(c2).close();
+      ConnectionManager.getConnection(c2).close();
       TEST_UTIL.getHBaseAdmin().setBalancerRunning(previousBalance, true);
     }
 
@@ -580,11 +580,11 @@ public class TestHCM {
     ConnectionManager.CONNECTION_INSTANCES.clear();
 
     try {
-      HConnection connection = HConnectionManager.getConnection(TEST_UTIL.getConfiguration());
+      HConnection connection = ConnectionManager.getConnection(TEST_UTIL.getConfiguration());
       connection.abort("test abortingHConnectionRemovesItselfFromHCM", new Exception(
           "test abortingHConnectionRemovesItselfFromHCM"));
       Assert.assertNotSame(connection,
-        HConnectionManager.getConnection(TEST_UTIL.getConfiguration()));
+        ConnectionManager.getConnection(TEST_UTIL.getConfiguration()));
     } finally {
       // Put original HConnections back
       ConnectionManager.CONNECTION_INSTANCES.clear();
@@ -861,7 +861,7 @@ public class TestHCM {
       configuration.set("some_key", String.valueOf(_randy.nextInt()));
       LOG.info("The hash code of the current configuration is: "
           + configuration.hashCode());
-      Connection currentConnection = HConnectionManager
+      Connection currentConnection = ConnectionManager
           .getConnection(configuration);
       if (previousConnection != null) {
         assertTrue(
@@ -877,59 +877,6 @@ public class TestHCM {
     }
   }
 
-  /**
-   * Makes sure that there is no leaking of
-   * {@link ConnectionManager.HConnectionImplementation} in the {@link HConnectionManager}
-   * class.
-   * @deprecated Tests deprecated functionality.  Remove in 1.0.
-   */
-  @Deprecated
-  @Test
-  public void testConnectionUniqueness() throws Exception {
-    int zkmaxconnections = TEST_UTIL.getConfiguration().
-      getInt(HConstants.ZOOKEEPER_MAX_CLIENT_CNXNS,
-          HConstants.DEFAULT_ZOOKEPER_MAX_CLIENT_CNXNS);
-    // Test up to a max that is < the maximum number of zk connections.  If we
-    // go above zk connections, we just fall into cycle where we are failing
-    // to set up a session and test runs for a long time.
-    int maxConnections = Math.min(zkmaxconnections - 1, 20);
-    List<HConnection> connections = new ArrayList<HConnection>(maxConnections);
-    Connection previousConnection = null;
-    try {
-      for (int i = 0; i < maxConnections; i++) {
-        // set random key to differentiate the connection from previous ones
-        Configuration configuration = new Configuration(TEST_UTIL.getConfiguration());
-        configuration.set("some_key", String.valueOf(_randy.nextInt()));
-        configuration.set(HConstants.HBASE_CLIENT_INSTANCE_ID,
-            String.valueOf(_randy.nextInt()));
-        LOG.info("The hash code of the current configuration is: "
-            + configuration.hashCode());
-        HConnection currentConnection =
-          HConnectionManager.getConnection(configuration);
-        if (previousConnection != null) {
-          assertTrue("Got the same connection even though its key changed!",
-              previousConnection != currentConnection);
-        }
-        // change the configuration, so that it is no longer reachable from the
-        // client's perspective. However, since its part of the LRU doubly linked
-        // list, it will eventually get thrown out, at which time it should also
-        // close the corresponding {@link HConnection}.
-        configuration.set("other_key", String.valueOf(_randy.nextInt()));
-
-        previousConnection = currentConnection;
-        LOG.info("The current HConnectionManager#HBASE_INSTANCES cache size is: "
-            + getHConnectionManagerCacheSize());
-        Thread.sleep(50);
-        connections.add(currentConnection);
-      }
-    } finally {
-      for (Connection c: connections) {
-        // Clean up connections made so we don't interfere w/ subsequent tests.
-        HConnectionManager.deleteConnection(c.getConfiguration());
-      }
-    }
-  }
-
   @Test
   public void testClosing() throws Exception {
     Configuration configuration =
@@ -937,36 +884,26 @@ public class TestHCM {
     configuration.set(HConstants.HBASE_CLIENT_INSTANCE_ID,
         String.valueOf(_randy.nextInt()));
 
+    // as connection caching is going away, now we're just testing
+    // that closed connection does actually get closed.
+
     Connection c1 = ConnectionFactory.createConnection(configuration);
-    // We create two connections with the same key.
     Connection c2 = ConnectionFactory.createConnection(configuration);
+    // no caching, different connections
+    assertTrue(c1 != c2);
 
-    Connection c3 = HConnectionManager.getConnection(configuration);
-    Connection c4 = HConnectionManager.getConnection(configuration);
-    assertTrue(c3 == c4);
-
+    // closing independently
     c1.close();
     assertTrue(c1.isClosed());
     assertFalse(c2.isClosed());
-    assertFalse(c3.isClosed());
 
-    c3.close();
-    // still a reference left
-    assertTrue(c3.isClosed());
-    
-    Connection c5 = HConnectionManager.getConnection(configuration);
-    assertTrue(c5 != c3);
-
-    assertFalse(c2.isClosed());
     c2.close();
     assertTrue(c2.isClosed());
-    c5.close();
-    assertTrue(c5.isClosed());
   }
 
   /**
    * Trivial test to verify that nobody messes with
-   * {@link HConnectionManager#createConnection(Configuration)}
+   * {@link ConnectionFactory#createConnection(Configuration)}
    */
   @Test
   public void testCreateConnection() throws Exception {
@@ -977,7 +914,7 @@ public class TestHCM {
     assertTrue(c1 != c2);
     assertTrue(c1.getConfiguration() == c2.getConfiguration());
     // make sure these were not cached
-    Connection c3 = HConnectionManager.getConnection(configuration);
+    Connection c3 = ConnectionManager.getConnection(configuration);
     assertTrue(c1 != c3);
     assertTrue(c2 != c3);
   }
@@ -998,7 +935,7 @@ public class TestHCM {
       TEST_UTIL.getConfiguration().get(HConstants.ZOOKEEPER_CLIENT_PORT));
 
     // This should be enough to connect
-    HConnection conn = HConnectionManager.getConnection(c);
+    HConnection conn = ConnectionManager.getConnection(c);
     assertTrue( conn.isMasterRunning() );
     conn.close();
   }
@@ -1222,9 +1159,9 @@ public class TestHCM {
       public void run() {
         while (!Thread.interrupted()) {
           try {
-            HConnection conn = HConnectionManager.getConnection(config);
+            HConnection conn = ConnectionManager.getConnection(config);
             LOG.info("Connection " + conn);
-            HConnectionManager.deleteStaleConnection(conn);
+            ConnectionManager.deleteStaleConnection(conn);
             LOG.info("Connection closed " + conn);
             // TODO: This sleep time should be less than the time that it takes to open and close
             // a table.  Ideally we would do a few runs first to measure.  For now this is

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
index a7025c6..30a0e88 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
@@ -49,7 +49,6 @@ import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
@@ -107,8 +106,7 @@ public class TestEndToEndSplitTransaction {
     byte []firstRow = Bytes.toBytes("aaa");
     byte []splitRow = Bytes.toBytes("lll");
     byte []lastRow = Bytes.toBytes("zzz");
-    HConnection con = HConnectionManager
-        .getConnection(TEST_UTIL.getConfiguration());
+    HConnection con = (HConnection) ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
     // this will also cache the region
     byte[] regionName = con.locateRegion(tableName, splitRow).getRegionInfo()
         .getRegionName();

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
index 5b04ab9..26f2db9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
@@ -39,8 +39,8 @@ import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
@@ -155,7 +155,7 @@ public abstract class MultiThreadedAction {
     this.dataGenerator = dataGen;
     this.tableName = tableName;
     this.actionLetter = actionLetter;
-    this.connection = HConnectionManager.createConnection(conf);
+    this.connection = (HConnection) ConnectionFactory.createConnection(conf);
   }
 
   public void start(long startKey, long endKey, int numThreads) throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java
index fc22292..d4f86e9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.util.HBaseFsck;

http://git-wip-us.apache.org/repos/asf/hbase/blob/f57dca5e/hbase-server/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/resources/log4j.properties b/hbase-server/src/test/resources/log4j.properties
index 6ee91ef..69171f7 100644
--- a/hbase-server/src/test/resources/log4j.properties
+++ b/hbase-server/src/test/resources/log4j.properties
@@ -63,4 +63,4 @@ log4j.logger.org.apache.hadoop.hbase=DEBUG
 log4j.org.apache.hadoop.metrics2.impl.MetricsSystemImpl=ERROR
 log4j.org.apache.hadoop.metrics2.util.MBeans=ERROR
 # Enable this to get detailed connection error/retry logging.
-# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE
+# log4j.logger.org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation=TRACE


Mime
View raw message