hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject [1/3] hbase git commit: HBASE-15610 Remove deprecated HConnection for 2.0 thus removing all PB references for 2.0
Date Sun, 29 May 2016 14:52:09 GMT
Repository: hbase
Updated Branches:
  refs/heads/master 74442fde0 -> cdd532da8


http://git-wip-us.apache.org/repos/asf/hbase/blob/cdd532da/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
index 3ebf31e..da0b9a6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
@@ -186,7 +186,7 @@ public class TestMasterNoCluster {
     CoordinatedStateManager cp = CoordinatedStateManagerFactory.getCoordinatedStateManager(
       TESTUTIL.getConfiguration());
     // Insert a mock for the connection, use TESTUTIL.getConfiguration rather than
-    // the conf from the master; the conf will already have an HConnection
+    // the conf from the master; the conf will already have an ClusterConnection
     // associate so the below mocking of a connection will fail.
     final ClusterConnection mockedConnection = HConnectionTestingUtility.getMockedConnectionAndDecorate(
         TESTUTIL.getConfiguration(), rs0, rs0, rs0.getServerName(),
@@ -293,7 +293,7 @@ public class TestMasterNoCluster {
       @Override
       public ClusterConnection getConnection() {
         // Insert a mock for the connection, use TESTUTIL.getConfiguration rather than
-        // the conf from the master; the conf will already have an HConnection
+        // the conf from the master; the conf will already have a Connection
         // associate so the below mocking of a connection will fail.
         try {
           return HConnectionTestingUtility.getMockedConnectionAndDecorate(

http://git-wip-us.apache.org/repos/asf/hbase/blob/cdd532da/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
index 692b5a0..7c41c0f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
@@ -86,8 +86,8 @@ public class TestRestartCluster {
     LOG.info("\n\nStarting cluster the second time");
     UTIL.restartHBaseCluster(3);
 
-    // Need to use a new 'Configuration' so we make a new HConnection.
-    // Otherwise we're reusing an HConnection that has gone stale because
+    // Need to use a new 'Configuration' so we make a new Connection.
+    // Otherwise we're reusing an Connection that has gone stale because
     // the shutdown of the cluster also called shut of the connection.
     allRegions = MetaTableAccessor.getAllRegions(UTIL.getConnection(), false);
     assertEquals(4, allRegions.size());

http://git-wip-us.apache.org/repos/asf/hbase/blob/cdd532da/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
index 76b4134..bd5c91e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
@@ -45,7 +45,7 @@ import org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread;
 import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext;
 import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.RegionServerCallable;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
@@ -195,11 +195,11 @@ public class TestHRegionServerBulkLoad {
         Path hfile = new Path(dir, family(i));
         byte[] fam = Bytes.toBytes(family(i));
         createHFile(fs, hfile, fam, QUAL, val, 1000);
-        famPaths.add(new Pair<byte[], String>(fam, hfile.toString()));
+        famPaths.add(new Pair<>(fam, hfile.toString()));
       }
 
       // bulk load HFiles
-      final HConnection conn = UTIL.getHBaseAdmin().getConnection();
+      final ClusterConnection conn = (ClusterConnection) UTIL.getAdmin().getConnection();
       RegionServerCallable<Void> callable =
           new RegionServerCallable<Void>(conn, tableName, Bytes.toBytes("aaa")) {
         @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/cdd532da/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java
index 104753a..538b8ac 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java
@@ -25,9 +25,9 @@ import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
 import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
@@ -51,7 +51,7 @@ public class TestReplicationSinkManager {
   public void setUp() {
     replicationPeers = mock(ReplicationPeers.class);
     replicationEndpoint = mock(HBaseReplicationEndpoint.class);
-    sinkManager = new ReplicationSinkManager(mock(HConnection.class),
+    sinkManager = new ReplicationSinkManager(mock(ClusterConnection.class),
                       PEER_CLUSTER_ID, replicationEndpoint, new Configuration());
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/cdd532da/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
index 1dfb526..da51516 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
@@ -53,7 +53,6 @@ import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.ResultScanner;
@@ -138,7 +137,7 @@ public class BaseTestHBaseFsck {
   protected void undeployRegion(Connection conn, ServerName sn,
       HRegionInfo hri) throws IOException, InterruptedException {
     try {
-      HBaseFsckRepair.closeRegionSilentlyAndWait((HConnection) conn, sn, hri);
+      HBaseFsckRepair.closeRegionSilentlyAndWait(conn, sn, hri);
       if (!hri.isMetaTable()) {
         admin.offline(hri.getRegionName());
       }
@@ -344,11 +343,11 @@ public class BaseTestHBaseFsck {
     Map<ServerName, List<String>> mm =
         new HashMap<ServerName, List<String>>();
     for (ServerName hsi : regionServers) {
-      AdminProtos.AdminService.BlockingInterface server = ((HConnection) connection).getAdmin(hsi);
+      AdminProtos.AdminService.BlockingInterface server = connection.getAdmin(hsi);
 
       // list all online regions from this region server
       List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(server);
-      List<String> regionNames = new ArrayList<String>();
+      List<String> regionNames = new ArrayList<>();
       for (HRegionInfo hri : regions) {
         regionNames.add(hri.getRegionNameAsString());
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/cdd532da/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
index aaa92ff..04c22b7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
@@ -19,6 +19,8 @@ package org.apache.hadoop.hbase.util;
 import static org.apache.hadoop.hbase.util.test.LoadTestDataGenerator.INCREMENT;
 import static org.apache.hadoop.hbase.util.test.LoadTestDataGenerator.MUTATE_INFO;
 
+import com.google.common.base.Preconditions;
+
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collection;
@@ -39,15 +41,12 @@ import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
 import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator;
 import org.apache.hadoop.hbase.util.test.LoadTestKVGenerator;
 import org.apache.hadoop.util.StringUtils;
 
-import com.google.common.base.Preconditions;
-
 /**
  * Common base class for reader and writer parts of multi-thread HBase load
  * test ({@link LoadTestTool}).
@@ -57,7 +56,7 @@ public abstract class MultiThreadedAction {
 
   protected final TableName tableName;
   protected final Configuration conf;
-  protected final HConnection connection; // all reader / writer threads will share this
connection
+  protected final ClusterConnection connection; // all reader / writer threads will share
this connection
 
   protected int numThreads = 1;
 
@@ -152,7 +151,7 @@ public abstract class MultiThreadedAction {
     this.dataGenerator = dataGen;
     this.tableName = tableName;
     this.actionLetter = actionLetter;
-    this.connection = (HConnection) ConnectionFactory.createConnection(conf);
+    this.connection = (ClusterConnection) ConnectionFactory.createConnection(conf);
   }
 
   public void start(long startKey, long endKey, int numThreads) throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/cdd532da/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java
index ca06e97..23b999e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java
@@ -28,6 +28,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Get;
 
 import org.apache.hadoop.hbase.client.Consistency;
@@ -158,7 +159,7 @@ public class MultiThreadedReader extends MultiThreadedAction
       setName(getClass().getSimpleName() + "_" + readerId);
     }
 
-    protected HTableInterface createTable() throws IOException {
+    protected Table createTable() throws IOException {
       return connection.getTable(tableName);
     }
 
@@ -379,7 +380,7 @@ public class MultiThreadedReader extends MultiThreadedAction
           numKeysVerified.incrementAndGet();
         }
       } else {
-        HRegionLocation hloc = connection.getRegionLocation(tableName,
+        HRegionLocation hloc = ((ClusterConnection) connection).getRegionLocation(tableName,
           get.getRow(), false);
         String rowKey = Bytes.toString(get.getRow());
         LOG.info("Key = " + rowKey + ", Region location: " + hloc);

http://git-wip-us.apache.org/repos/asf/hbase/blob/cdd532da/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java
index e28acc6..0f3baf9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java
@@ -130,7 +130,7 @@ public class MultiThreadedUpdater extends MultiThreadedWriterBase {
       table = createTable();
     }
 
-    protected HTableInterface createTable() throws IOException {
+    protected Table createTable() throws IOException {
       return connection.getTable(tableName);
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/cdd532da/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java
index 83e207a..32a06bb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java
@@ -87,7 +87,7 @@ public class MultiThreadedWriter extends MultiThreadedWriterBase {
       table = createTable();
     }
 
-    protected HTableInterface createTable() throws IOException {
+    protected Table createTable() throws IOException {
       return connection.getTable(tableName);
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/cdd532da/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestConnectionCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestConnectionCache.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestConnectionCache.java
index 99b82f2..88df7f4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestConnectionCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestConnectionCache.java
@@ -31,7 +31,7 @@ public class TestConnectionCache extends TestCase {
   private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
 
   /**
-   * test for ConnectionCache cleaning expired HConnection
+   * test for ConnectionCache cleaning expired Connection
    */
   @Test
   public void testConnectionChore() throws Exception {

http://git-wip-us.apache.org/repos/asf/hbase/blob/cdd532da/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java
index 6d0e48c..7f023e0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java
@@ -19,8 +19,13 @@
 
 package org.apache.hadoop.hbase.util;
 
-
 import com.google.common.collect.Multimap;
+
+import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -33,7 +38,6 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
@@ -52,11 +56,6 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import java.util.concurrent.ScheduledThreadPoolExecutor;
-import java.util.concurrent.SynchronousQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-
 import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.*;
 import static org.junit.Assert.*;
 
@@ -350,7 +349,7 @@ public class TestHBaseFsckTwoRS extends BaseTestHBaseFsck {
             }
           }
 
-          HBaseFsckRepair.closeRegionSilentlyAndWait((HConnection) connection,
+          HBaseFsckRepair.closeRegionSilentlyAndWait(connection,
               cluster.getRegionServer(k).getServerName(), hbi.getHdfsHRI());
           admin.offline(regionName);
           break;

http://git-wip-us.apache.org/repos/asf/hbase/blob/cdd532da/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala
----------------------------------------------------------------------
diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala
index 20866e2..a9b38ba 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala
@@ -56,7 +56,7 @@ import scala.collection.mutable
   *
   * HBaseContext will take the responsibilities
   * of disseminating the configuration information
-  * to the working and managing the life cycle of HConnections.
+  * to the working and managing the life cycle of Connections.
  */
 class HBaseContext(@transient sc: SparkContext,
                    @transient val config: Configuration,
@@ -88,14 +88,14 @@ class HBaseContext(@transient sc: SparkContext,
   /**
    * A simple enrichment of the traditional Spark RDD foreachPartition.
    * This function differs from the original in that it offers the
-   * developer access to a already connected HConnection object
+   * developer access to a already connected Connection object
    *
-   * Note: Do not close the HConnection object.  All HConnection
+   * Note: Do not close the Connection object.  All Connection
    * management is handled outside this method
    *
    * @param rdd  Original RDD with data to iterate over
    * @param f    Function to be given a iterator to iterate through
-   *             the RDD values and a HConnection object to interact
+   *             the RDD values and a Connection object to interact
    *             with HBase
    */
   def foreachPartition[T](rdd: RDD[T],
@@ -107,14 +107,14 @@ class HBaseContext(@transient sc: SparkContext,
   /**
    * A simple enrichment of the traditional Spark Streaming dStream foreach
    * This function differs from the original in that it offers the
-   * developer access to a already connected HConnection object
+   * developer access to a already connected Connection object
    *
-   * Note: Do not close the HConnection object.  All HConnection
+   * Note: Do not close the Connection object.  All Connection
    * management is handled outside this method
    *
    * @param dstream  Original DStream with data to iterate over
    * @param f        Function to be given a iterator to iterate through
-   *                 the DStream values and a HConnection object to
+   *                 the DStream values and a Connection object to
    *                 interact with HBase
    */
   def foreachPartition[T](dstream: DStream[T],
@@ -127,14 +127,14 @@ class HBaseContext(@transient sc: SparkContext,
   /**
    * A simple enrichment of the traditional Spark RDD mapPartition.
    * This function differs from the original in that it offers the
-   * developer access to a already connected HConnection object
+   * developer access to a already connected Connection object
    *
-   * Note: Do not close the HConnection object.  All HConnection
+   * Note: Do not close the Connection object.  All Connection
    * management is handled outside this method
    *
    * @param rdd  Original RDD with data to iterate over
    * @param mp   Function to be given a iterator to iterate through
-   *             the RDD values and a HConnection object to interact
+   *             the RDD values and a Connection object to interact
    *             with HBase
    * @return     Returns a new RDD generated by the user definition
    *             function just like normal mapPartition
@@ -153,9 +153,9 @@ class HBaseContext(@transient sc: SparkContext,
    * foreachPartition.
    *
    * This function differs from the original in that it offers the
-   * developer access to a already connected HConnection object
+   * developer access to a already connected Connection object
    *
-   * Note: Do not close the HConnection object.  All HConnection
+   * Note: Do not close the Connection object.  All Connection
    * management is handled outside this method
    *
    * Note: Make sure to partition correctly to avoid memory issue when
@@ -163,7 +163,7 @@ class HBaseContext(@transient sc: SparkContext,
    *
    * @param dstream  Original DStream with data to iterate over
    * @param f       Function to be given a iterator to iterate through
-   *                 the DStream values and a HConnection object to
+   *                 the DStream values and a Connection object to
    *                 interact with HBase
    * @return         Returns a new DStream generated by the user
    *                 definition function just like normal mapPartition
@@ -179,9 +179,9 @@ class HBaseContext(@transient sc: SparkContext,
    * mapPartition.
    *
    * This function differs from the original in that it offers the
-   * developer access to a already connected HConnection object
+   * developer access to a already connected Connection object
    *
-   * Note: Do not close the HConnection object.  All HConnection
+   * Note: Do not close the Connection object.  All Connection
    * management is handled outside this method
    *
    * Note: Make sure to partition correctly to avoid memory issue when
@@ -189,7 +189,7 @@ class HBaseContext(@transient sc: SparkContext,
    *
    * @param dstream  Original DStream with data to iterate over
    * @param f       Function to be given a iterator to iterate through
-   *                 the DStream values and a HConnection object to
+   *                 the DStream values and a Connection object to
    *                 interact with HBase
    * @return         Returns a new DStream generated by the user
    *                 definition function just like normal mapPartition
@@ -208,7 +208,7 @@ class HBaseContext(@transient sc: SparkContext,
    *
    * It allow addition support for a user to take RDD
    * and generate puts and send them to HBase.
-   * The complexity of managing the HConnection is
+   * The complexity of managing the Connection is
    * removed from the developer
    *
    * @param rdd       Original RDD with data to iterate over
@@ -253,7 +253,7 @@ class HBaseContext(@transient sc: SparkContext,
    * It allow addition support for a user to take a DStream and
    * generate puts and send them to HBase.
    *
-   * The complexity of managing the HConnection is
+   * The complexity of managing the Connection is
    * removed from the developer
    *
    * @param dstream    Original DStream with data to iterate over
@@ -274,7 +274,7 @@ class HBaseContext(@transient sc: SparkContext,
    * A simple abstraction over the HBaseContext.foreachPartition method.
    *
    * It allow addition support for a user to take a RDD and generate delete
-   * and send them to HBase.  The complexity of managing the HConnection is
+   * and send them to HBase.  The complexity of managing the Connection is
    * removed from the developer
    *
    * @param rdd       Original RDD with data to iterate over
@@ -294,7 +294,7 @@ class HBaseContext(@transient sc: SparkContext,
    * It allow addition support for a user to take a DStream and
    * generate Delete and send them to HBase.
    *
-   * The complexity of managing the HConnection is
+   * The complexity of managing the Connection is
    * removed from the developer
    *
    * @param dstream    Original DStream with data to iterate over

http://git-wip-us.apache.org/repos/asf/hbase/blob/cdd532da/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/JavaHBaseContext.scala
----------------------------------------------------------------------
diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/JavaHBaseContext.scala
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/JavaHBaseContext.scala
index d8fdb23..7deb5b8 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/JavaHBaseContext.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/JavaHBaseContext.scala
@@ -43,14 +43,14 @@ class JavaHBaseContext(@transient jsc: JavaSparkContext,
   /**
    * A simple enrichment of the traditional Spark javaRdd foreachPartition.
    * This function differs from the original in that it offers the
-   * developer access to a already connected HConnection object
+   * developer access to a already connected Connection object
    *
-   * Note: Do not close the HConnection object.  All HConnection
+   * Note: Do not close the Connection object.  All Connection
    * management is handled outside this method
    *
    * @param javaRdd Original javaRdd with data to iterate over
    * @param f       Function to be given a iterator to iterate through
-   *                the RDD values and a HConnection object to interact
+   *                the RDD values and a Connection object to interact
    *                with HBase
    */
   def foreachPartition[T](javaRdd: JavaRDD[T],
@@ -65,14 +65,14 @@ class JavaHBaseContext(@transient jsc: JavaSparkContext,
   /**
    * A simple enrichment of the traditional Spark Streaming dStream foreach
    * This function differs from the original in that it offers the
-   * developer access to a already connected HConnection object
+   * developer access to a already connected Connection object
    *
-   * Note: Do not close the HConnection object.  All HConnection
+   * Note: Do not close the Connection object.  All Connection
    * management is handled outside this method
    *
    * @param javaDstream Original DStream with data to iterate over
    * @param f           Function to be given a iterator to iterate through
-   *                    the JavaDStream values and a HConnection object to
+   *                    the JavaDStream values and a Connection object to
    *                    interact with HBase
    */
   def foreachPartition[T](javaDstream: JavaDStream[T],
@@ -84,9 +84,9 @@ class JavaHBaseContext(@transient jsc: JavaSparkContext,
   /**
    * A simple enrichment of the traditional Spark JavaRDD mapPartition.
    * This function differs from the original in that it offers the
-   * developer access to a already connected HConnection object
+   * developer access to a already connected Connection object
    *
-   * Note: Do not close the HConnection object.  All HConnection
+   * Note: Do not close the Connection object.  All Connection
    * management is handled outside this method
    *
    * Note: Make sure to partition correctly to avoid memory issue when
@@ -94,7 +94,7 @@ class JavaHBaseContext(@transient jsc: JavaSparkContext,
    *
    * @param javaRdd Original JavaRdd with data to iterate over
    * @param f       Function to be given a iterator to iterate through
-   *                the RDD values and a HConnection object to interact
+   *                the RDD values and a Connection object to interact
    *                with HBase
    * @return        Returns a new RDD generated by the user definition
    *                function just like normal mapPartition
@@ -118,9 +118,9 @@ class JavaHBaseContext(@transient jsc: JavaSparkContext,
    * mapPartition.
    *
    * This function differs from the original in that it offers the
-   * developer access to a already connected HConnection object
+   * developer access to a already connected Connection object
    *
-   * Note: Do not close the HConnection object.  All HConnection
+   * Note: Do not close the Connection object.  All Connection
    * management is handled outside this method
    *
    * Note: Make sure to partition correctly to avoid memory issue when
@@ -128,7 +128,7 @@ class JavaHBaseContext(@transient jsc: JavaSparkContext,
    *
    * @param javaDstream Original JavaDStream with data to iterate over
    * @param mp          Function to be given a iterator to iterate through
-   *                    the JavaDStream values and a HConnection object to
+   *                    the JavaDStream values and a Connection object to
    *                    interact with HBase
    * @return            Returns a new JavaDStream generated by the user
    *                    definition function just like normal mapPartition
@@ -146,7 +146,7 @@ class JavaHBaseContext(@transient jsc: JavaSparkContext,
    *
    * It allow addition support for a user to take JavaRDD
    * and generate puts and send them to HBase.
-   * The complexity of managing the HConnection is
+   * The complexity of managing the Connection is
    * removed from the developer
    *
    * @param javaRdd   Original JavaRDD with data to iterate over
@@ -167,7 +167,7 @@ class JavaHBaseContext(@transient jsc: JavaSparkContext,
    * It allow addition support for a user to take a JavaDStream and
    * generate puts and send them to HBase.
    *
-   * The complexity of managing the HConnection is
+   * The complexity of managing the Connection is
    * removed from the developer
    *
    * @param javaDstream Original DStream with data to iterate over
@@ -189,7 +189,7 @@ class JavaHBaseContext(@transient jsc: JavaSparkContext,
    * It allow addition support for a user to take a JavaRDD and
    * generate delete and send them to HBase.
    *
-   * The complexity of managing the HConnection is
+   * The complexity of managing the Connection is
    * removed from the developer
    *
    * @param javaRdd   Original JavaRDD with data to iterate over
@@ -209,7 +209,7 @@ class JavaHBaseContext(@transient jsc: JavaSparkContext,
    * It allow addition support for a user to take a JavaDStream and
    * generate Delete and send them to HBase.
    *
-   * The complexity of managing the HConnection is
+   * The complexity of managing the Connection is
    * removed from the developer
    *
    * @param javaDStream Original DStream with data to iterate over


Mime
View raw message