hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject hbase git commit: HBASE-12422 Use ConnectionFactory in HTable constructors Fix method signature in test
Date Mon, 15 Dec 2014 03:58:45 GMT
Repository: hbase
Updated Branches:
  refs/heads/branch-1 4f22143df -> b1c95d50c


HBASE-12422 Use ConnectionFactory in HTable constructors Fix method signature in test

Conflicts:
	hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/SecureBulkLoadClient.java
	hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b1c95d50
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b1c95d50
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b1c95d50

Branch: refs/heads/branch-1
Commit: b1c95d50cf2f2de260595e7c076565e492ff9330
Parents: 4f22143
Author: stack <stack@apache.org>
Authored: Sat Dec 13 21:38:34 2014 -0800
Committer: stack <stack@apache.org>
Committed: Sun Dec 14 19:56:20 2014 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/client/HTable.java  |   4 +-
 .../coprocessor/SecureBulkLoadClient.java       |   8 +-
 .../hbase/mapreduce/LoadIncrementalHFiles.java  |   9 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |  21 +-
 .../TestLoadIncrementalHFilesSplitRecovery.java | 209 ++++++++++---------
 ...ecureLoadIncrementalHFilesSplitRecovery.java |   7 +-
 .../apache/hadoop/hbase/util/TestHBaseFsck.java |  83 ++++----
 7 files changed, 177 insertions(+), 164 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/b1c95d50/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index f765c2d..146cf80 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -180,7 +180,7 @@ public class HTable implements HTableInterface, RegionLocator {
       this.connection = null;
       return;
     }
-    this.connection = ConnectionManager.getConnectionInternal(conf);
+    this.connection = (ClusterConnection) ConnectionFactory.createConnection(conf);
     this.configuration = conf;
 
     this.pool = getDefaultExecutor(conf);
@@ -252,7 +252,7 @@ public class HTable implements HTableInterface, RegionLocator {
   @Deprecated
   public HTable(Configuration conf, final TableName tableName, final ExecutorService pool)
       throws IOException {
-    this.connection = ConnectionManager.getConnectionInternal(conf);
+    this.connection = (ClusterConnection) ConnectionFactory.createConnection(conf);
     this.configuration = conf;
     this.pool = pool;
     if (pool == null) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b1c95d50/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/SecureBulkLoadClient.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/SecureBulkLoadClient.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/SecureBulkLoadClient.java
index 5ae4bed..e2e87c2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/SecureBulkLoadClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/SecureBulkLoadClient.java
@@ -18,11 +18,11 @@
 
 package org.apache.hadoop.hbase.client.coprocessor;
 
-import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW;
-import static org.apache.hadoop.hbase.HConstants.LAST_ROW;
-
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.util.ByteStringer;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.fs.Path;
@@ -77,7 +77,7 @@ public class SecureBulkLoadClient {
       if (controller.failedOnException()) {
         throw controller.getFailedOn();
       }
-      
+
       return response.getBulkToken();
     } catch (Throwable throwable) {
       throw new IOException(throwable);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b1c95d50/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
index 55374b2..26ef8ae 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
@@ -646,10 +646,11 @@ public class LoadIncrementalHFiles extends Configured implements Tool
{
           if (!isSecureBulkLoadEndpointAvailable()) {
             success = ProtobufUtil.bulkLoadHFile(getStub(), famPaths, regionName, assignSeqIds);
           } else {
-            Table table = new HTable(conn.getConfiguration(), getTableName());
-            secureClient = new SecureBulkLoadClient(table);
-            success = secureClient.bulkLoadHFiles(famPaths, fsDelegationToken.getUserToken(),
-              bulkToken, getLocation().getRegionInfo().getStartKey());
+            try (Table table = conn.getTable(getTableName())) {
+              secureClient = new SecureBulkLoadClient(table);
+              success = secureClient.bulkLoadHFiles(famPaths, fsDelegationToken.getUserToken(),
+                bulkToken, getLocation().getRegionInfo().getStartKey());
+            }
           }
           return success;
         } finally {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b1c95d50/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 2a00510..7d50113 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -79,19 +79,22 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HConnectable;
 import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HConnectionManager;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.MetaScanner;
-import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
 import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Table;
@@ -104,7 +107,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.Block
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-import org.apache.hadoop.hbase.wal.WALSplitter;
+import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
 import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE;
@@ -112,10 +115,10 @@ import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker;
 import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler;
 import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandlerImpl;
 import org.apache.hadoop.hbase.util.hbck.TableLockChecker;
+import org.apache.hadoop.hbase.wal.WALSplitter;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RemoteException;
@@ -195,8 +198,8 @@ public class HBaseFsck extends Configured {
    **********************/
   private static final Log LOG = LogFactory.getLog(HBaseFsck.class.getName());
   private ClusterStatus status;
-  private HConnection connection;
-  private HBaseAdmin admin;
+  private ClusterConnection connection;
+  private Admin admin;
   private Table meta;
   // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions
   protected ExecutorService executor;
@@ -387,9 +390,9 @@ public class HBaseFsck extends Configured {
     });
     LOG.debug("Launching hbck");
 
-    connection = HConnectionManager.createConnection(getConf());
-    admin = new HBaseAdmin(connection);
-    meta = new HTable(TableName.META_TABLE_NAME, connection);
+    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());
+    admin = connection.getAdmin();
+    meta = connection.getTable(TableName.META_TABLE_NAME);
     status = admin.getClusterStatus();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b1c95d50/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
index 4f181f8..e7ee0ab 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
@@ -35,17 +35,17 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableExistsException;
-import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HConnection;
@@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.TestHRegionServerBulkLoad;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -121,15 +122,17 @@ public class TestLoadIncrementalHFilesSplitRecovery {
    * Creates a table with given table name and specified number of column
    * families if the table does not already exist.
    */
-  private void setupTable(TableName table, int cfs) throws IOException {
+  private void setupTable(final Connection connection, TableName table, int cfs)
+  throws IOException {
     try {
       LOG.info("Creating table " + table);
       HTableDescriptor htd = new HTableDescriptor(table);
       for (int i = 0; i < cfs; i++) {
         htd.addFamily(new HColumnDescriptor(family(i)));
       }
-
-      util.getHBaseAdmin().createTable(htd);
+      try (Admin admin = connection.getAdmin()) {
+        admin.createTable(htd);
+      }
     } catch (TableExistsException tee) {
       LOG.info("Table " + table + " already exists");
     }
@@ -168,12 +171,14 @@ public class TestLoadIncrementalHFilesSplitRecovery {
   /**
    * Populate table with known values.
    */
-  private void populateTable(TableName table, int value) throws Exception {
+  private void populateTable(final Connection connection, TableName table, int value)
+  throws Exception {
     // create HFiles for different column families
     LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration());
     Path bulk1 = buildBulkFiles(table, value);
-    HTable t = new HTable(util.getConfiguration(), table);
-    lih.doBulkLoad(bulk1, t);
+    try (Table t = connection.getTable(table)) {
+      lih.doBulkLoad(bulk1, (HTable)t);
+    }
   }
 
   /**
@@ -263,20 +268,18 @@ public class TestLoadIncrementalHFilesSplitRecovery {
   @Test(expected=IOException.class, timeout=120000)
   public void testBulkLoadPhaseFailure() throws Exception {
     TableName table = TableName.valueOf("bulkLoadPhaseFailure");
-    setupTable(table, 10);
-
     final AtomicInteger attmptedCalls = new AtomicInteger();
     final AtomicInteger failedCalls = new AtomicInteger();
     util.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
-    try {
+    try (Connection connection = ConnectionFactory.createConnection(this.util.getConfiguration()))
{
+      setupTable(connection, table, 10);
       LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration()) {
-
         protected List<LoadQueueItem> tryAtomicRegionLoad(final HConnection conn,
             TableName tableName, final byte[] first, Collection<LoadQueueItem> lqis)
-            throws IOException {
+                throws IOException {
           int i = attmptedCalls.incrementAndGet();
           if (i == 1) {
-            HConnection errConn = null;
+            Connection errConn = null;
             try {
               errConn = getMockedConnection(util.getConfiguration());
             } catch (Exception e) {
@@ -284,23 +287,24 @@ public class TestLoadIncrementalHFilesSplitRecovery {
               throw new RuntimeException("mocking cruft, should never happen");
             }
             failedCalls.incrementAndGet();
-            return super.tryAtomicRegionLoad(errConn, tableName, first, lqis);
+            return super.tryAtomicRegionLoad((HConnection)errConn, tableName, first, lqis);
           }
 
-          return super.tryAtomicRegionLoad(conn, tableName, first, lqis);
+          return super.tryAtomicRegionLoad((HConnection)conn, tableName, first, lqis);
         }
       };
-
-      // create HFiles for different column families
-      Path dir = buildBulkFiles(table, 1);
-      HTable t = new HTable(util.getConfiguration(), table);
-      lih.doBulkLoad(dir, t);
-    } finally {
-      util.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
-        HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
+      try {
+        // create HFiles for different column families
+        Path dir = buildBulkFiles(table, 1);
+        try (Table t = connection.getTable(table)) {
+          lih.doBulkLoad(dir, (HTable)t);
+        }
+      } finally {
+        util.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
+            HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
+      }
+      fail("doBulkLoad should have thrown an exception");
     }
-
-    fail("doBulkLoad should have thrown an exception");
   }
 
   @SuppressWarnings("deprecation")
@@ -335,39 +339,39 @@ public class TestLoadIncrementalHFilesSplitRecovery {
   @Test (timeout=120000)
   public void testSplitWhileBulkLoadPhase() throws Exception {
     final TableName table = TableName.valueOf("splitWhileBulkloadPhase");
-    setupTable(table, 10);
-    populateTable(table,1);
-    assertExpectedTable(table, ROWCOUNT, 1);
-
-    // Now let's cause trouble.  This will occur after checks and cause bulk
-    // files to fail when attempt to atomically import.  This is recoverable.
-    final AtomicInteger attemptedCalls = new AtomicInteger();
-    LoadIncrementalHFiles lih2 = new LoadIncrementalHFiles(
-        util.getConfiguration()) {
-
-      protected void bulkLoadPhase(final Table htable, final HConnection conn,
-          ExecutorService pool, Deque<LoadQueueItem> queue,
-          final Multimap<ByteBuffer, LoadQueueItem> regionGroups) throws IOException
{
-        int i = attemptedCalls.incrementAndGet();
-        if (i == 1) {
-          // On first attempt force a split.
-          forceSplit(table);
+    try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration()))
{
+      setupTable(connection, table, 10);
+      populateTable(connection, table,1);
+      assertExpectedTable(table, ROWCOUNT, 1);
+
+      // Now let's cause trouble.  This will occur after checks and cause bulk
+      // files to fail when attempt to atomically import.  This is recoverable.
+      final AtomicInteger attemptedCalls = new AtomicInteger();
+      LoadIncrementalHFiles lih2 = new LoadIncrementalHFiles(util.getConfiguration()) {
+        protected void bulkLoadPhase(final Table htable, final HConnection conn,
+            ExecutorService pool, Deque<LoadQueueItem> queue,
+            final Multimap<ByteBuffer, LoadQueueItem> regionGroups) throws IOException
{
+          int i = attemptedCalls.incrementAndGet();
+          if (i == 1) {
+            // On first attempt force a split.
+            forceSplit(table);
+          }
+          super.bulkLoadPhase(htable, conn, pool, queue, regionGroups);
         }
+      };
 
-        super.bulkLoadPhase(htable, conn, pool, queue, regionGroups);
+      // create HFiles for different column families
+      try (Table t = connection.getTable(table)) {
+        Path bulk = buildBulkFiles(table, 2);
+        lih2.doBulkLoad(bulk, (HTable)t);
       }
-    };
 
-    // create HFiles for different column families
-    HTable t = new HTable(util.getConfiguration(), table);
-    Path bulk = buildBulkFiles(table, 2);
-    lih2.doBulkLoad(bulk, t);
-
-    // check that data was loaded
-    // The three expected attempts are 1) failure because need to split, 2)
-    // load of split top 3) load of split bottom
-    assertEquals(attemptedCalls.get(), 3);
-    assertExpectedTable(table, ROWCOUNT, 2);
+      // check that data was loaded
+      // The three expected attempts are 1) failure because need to split, 2)
+      // load of split top 3) load of split bottom
+      assertEquals(attemptedCalls.get(), 3);
+      assertExpectedTable(table, ROWCOUNT, 2);
+    }
   }
 
   /**
@@ -377,33 +381,35 @@ public class TestLoadIncrementalHFilesSplitRecovery {
   @Test (timeout=120000)
   public void testGroupOrSplitPresplit() throws Exception {
     final TableName table = TableName.valueOf("groupOrSplitPresplit");
-    setupTable(table, 10);
-    populateTable(table, 1);
-    assertExpectedTable(table, ROWCOUNT, 1);
-    forceSplit(table);
-
-    final AtomicInteger countedLqis= new AtomicInteger();
-    LoadIncrementalHFiles lih = new LoadIncrementalHFiles(
-        util.getConfiguration()) {
-      protected List<LoadQueueItem> groupOrSplit(
-          Multimap<ByteBuffer, LoadQueueItem> regionGroups,
-          final LoadQueueItem item, final HTable htable,
-          final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
-        List<LoadQueueItem> lqis = super.groupOrSplit(regionGroups, item, htable, startEndKeys);
-        if (lqis != null) {
-          countedLqis.addAndGet(lqis.size());
+    try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration()))
{
+      setupTable(connection, table, 10);
+      populateTable(connection, table, 1);
+      assertExpectedTable(connection, table, ROWCOUNT, 1);
+      forceSplit(table);
+
+      final AtomicInteger countedLqis= new AtomicInteger();
+      LoadIncrementalHFiles lih = new LoadIncrementalHFiles(
+          util.getConfiguration()) {
+        protected List<LoadQueueItem> groupOrSplit(
+            Multimap<ByteBuffer, LoadQueueItem> regionGroups,
+            final LoadQueueItem item, final HTable htable,
+            final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
+          List<LoadQueueItem> lqis = super.groupOrSplit(regionGroups, item, htable,
startEndKeys);
+          if (lqis != null) {
+            countedLqis.addAndGet(lqis.size());
+          }
+          return lqis;
         }
-        return lqis;
-      }
-    };
-
-    // create HFiles for different column families
-    Path bulk = buildBulkFiles(table, 2);
-    HTable ht = new HTable(util.getConfiguration(), table);
-    lih.doBulkLoad(bulk, ht);
+      };
 
-    assertExpectedTable(table, ROWCOUNT, 2);
-    assertEquals(20, countedLqis.get());
+      // create HFiles for different column families
+      Path bulk = buildBulkFiles(table, 2);
+      try (Table t = connection.getTable(table)) {
+        lih.doBulkLoad(bulk, (HTable)t);
+      }
+      assertExpectedTable(connection, table, ROWCOUNT, 2);
+      assertEquals(20, countedLqis.get());
+    }
   }
 
   /**
@@ -413,29 +419,32 @@ public class TestLoadIncrementalHFilesSplitRecovery {
   @Test(expected = IOException.class, timeout=120000)
   public void testGroupOrSplitFailure() throws Exception {
     TableName table = TableName.valueOf("groupOrSplitFailure");
-    setupTable(table, 10);
+    try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration()))
{
+      setupTable(connection, table, 10);
 
-    LoadIncrementalHFiles lih = new LoadIncrementalHFiles(
-        util.getConfiguration()) {
-      int i = 0;
+      LoadIncrementalHFiles lih = new LoadIncrementalHFiles(
+          util.getConfiguration()) {
+        int i = 0;
 
-      protected List<LoadQueueItem> groupOrSplit(
-          Multimap<ByteBuffer, LoadQueueItem> regionGroups,
-          final LoadQueueItem item, final HTable table,
-          final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
-        i++;
+        protected List<LoadQueueItem> groupOrSplit(
+            Multimap<ByteBuffer, LoadQueueItem> regionGroups,
+            final LoadQueueItem item, final HTable table,
+            final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
+          i++;
 
-        if (i == 5) {
-          throw new IOException("failure");
+          if (i == 5) {
+            throw new IOException("failure");
+          }
+          return super.groupOrSplit(regionGroups, item, table, startEndKeys);
         }
-        return super.groupOrSplit(regionGroups, item, table, startEndKeys);
-      }
-    };
+      };
 
-    // create HFiles for different column families
-    Path dir = buildBulkFiles(table,1);
-    HTable t = new HTable(util.getConfiguration(), table);
-    lih.doBulkLoad(dir, t);
+      // create HFiles for different column families
+      Path dir = buildBulkFiles(table,1);
+      try (Table t = connection.getTable(table)) {
+        lih.doBulkLoad(dir, (HTable)t);
+      }
+    }
 
     fail("doBulkLoad should have thrown an exception");
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b1c95d50/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSecureLoadIncrementalHFilesSplitRecovery.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSecureLoadIncrementalHFilesSplitRecovery.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSecureLoadIncrementalHFilesSplitRecovery.java
index 5e6c6c3..ea13845 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSecureLoadIncrementalHFilesSplitRecovery.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSecureLoadIncrementalHFilesSplitRecovery.java
@@ -31,7 +31,7 @@ import org.junit.experimental.categories.Category;
 /**
  * Reruns TestSecureLoadIncrementalHFilesSplitRecovery
  * using LoadIncrementalHFiles in secure mode.
- * This suite is unable to verify the security handoff/turnover
+ * This suite is unable to verify the security handoff/turnove
  * as miniCluster is running as system user thus has root privileges
  * and delegation tokens don't seem to work on miniDFS.
  *
@@ -61,9 +61,8 @@ public class TestSecureLoadIncrementalHFilesSplitRecovery extends TestLoadIncrem
   }
 
   //Disabling this test as it does not work in secure mode
-  @Test
+  @Test (timeout=180000)
   @Override
   public void testBulkLoadPhaseFailure() {
   }
-}
-
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/b1c95d50/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
index 817a309..b61a399 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
@@ -156,7 +156,7 @@ public class TestHBaseFsck {
     TEST_UTIL.shutdownMiniCluster();
   }
 
-  @Test
+  @Test (timeout=180000)
   public void testHBaseFsck() throws Exception {
     assertNoErrors(doFsck(conf, false));
     TableName table = TableName.valueOf("tableBadMetaAssign");
@@ -341,9 +341,10 @@ public class TestHBaseFsck {
         }
 
         if (metaRow) {
-          Table meta = new HTable(conf, TableName.META_TABLE_NAME, executorService);
-          Delete delete = new Delete(deleteRow);
-          meta.delete(delete);
+          try (Table meta = conn.getTable(TableName.META_TABLE_NAME, executorService)) {
+            Delete delete = new Delete(deleteRow);
+            meta.delete(delete);
+          }
         }
       }
       LOG.info(hri.toString() + hsa.toString());
@@ -440,7 +441,7 @@ public class TestHBaseFsck {
   /**
    * This creates a clean table and confirms that the table is clean.
    */
-  @Test
+  @Test (timeout=180000)
   public void testHBaseFsckClean() throws Exception {
     assertNoErrors(doFsck(conf, false));
     TableName table = TableName.valueOf("tableClean");
@@ -464,7 +465,7 @@ public class TestHBaseFsck {
   /**
    * Test thread pooling in the case where there are more regions than threads
    */
-  @Test
+  @Test (timeout=180000)
   public void testHbckThreadpooling() throws Exception {
     TableName table =
         TableName.valueOf("tableDupeStartKey");
@@ -483,7 +484,7 @@ public class TestHBaseFsck {
     }
   }
 
-  @Test
+  @Test (timeout=180000)
   public void testHbckFixOrphanTable() throws Exception {
     TableName table = TableName.valueOf("tableInfo");
     FileSystem fs = null;
@@ -537,7 +538,7 @@ public class TestHBaseFsck {
    *
    * @throws Exception
    */
-  @Test
+  @Test (timeout=180000)
   public void testParallelHbck() throws Exception {
     final ExecutorService service;
     final Future<HBaseFsck> hbck1,hbck2;
@@ -580,7 +581,7 @@ public class TestHBaseFsck {
    * This create and fixes a bad table with regions that have a duplicate
    * start key
    */
-  @Test
+  @Test (timeout=180000)
   public void testDupeStartKey() throws Exception {
     TableName table =
         TableName.valueOf("tableDupeStartKey");
@@ -621,7 +622,7 @@ public class TestHBaseFsck {
    * This creates a table with region_replica > 1 and verifies hbck runs
    * successfully
    */
-  @Test
+  @Test (timeout=180000)
   public void testHbckWithRegionReplica() throws Exception {
     TableName table =
         TableName.valueOf("tableWithReplica");
@@ -673,7 +674,7 @@ public class TestHBaseFsck {
    * This create and fixes a bad table with regions that have a duplicate
    * start key
    */
-  @Test
+  @Test (timeout=180000)
   public void testDupeRegion() throws Exception {
     TableName table =
         TableName.valueOf("tableDupeRegion");
@@ -726,7 +727,7 @@ public class TestHBaseFsck {
   /**
    * This creates and fixes a bad table with regions that has startkey == endkey
    */
-  @Test
+  @Test (timeout=180000)
   public void testDegenerateRegions() throws Exception {
     TableName table = TableName.valueOf("tableDegenerateRegions");
     try {
@@ -766,7 +767,7 @@ public class TestHBaseFsck {
    * This creates and fixes a bad table where a region is completely contained
    * by another region.
    */
-  @Test
+  @Test (timeout=180000)
   public void testContainedRegionOverlap() throws Exception {
     TableName table =
         TableName.valueOf("tableContainedRegionOverlap");
@@ -808,7 +809,7 @@ public class TestHBaseFsck {
    * region. Mess around the meta data so that closeRegion/offlineRegion
    * throws exceptions.
    */
-  @Test
+  @Test (timeout=180000)
   public void testSidelineOverlapRegion() throws Exception {
     TableName table =
         TableName.valueOf("testSidelineOverlapRegion");
@@ -899,7 +900,7 @@ public class TestHBaseFsck {
    * This creates and fixes a bad table where a region is completely contained
    * by another region, and there is a hole (sort of like a bad split)
    */
-  @Test
+  @Test (timeout=180000)
   public void testOverlapAndOrphan() throws Exception {
     TableName table =
         TableName.valueOf("tableOverlapAndOrphan");
@@ -944,7 +945,7 @@ public class TestHBaseFsck {
    * a start key contained in another region and its end key is contained in
    * yet another region.
    */
-  @Test
+  @Test (timeout=180000)
   public void testCoveredStartKey() throws Exception {
     TableName table =
         TableName.valueOf("tableCoveredStartKey");
@@ -985,7 +986,7 @@ public class TestHBaseFsck {
    * This creates and fixes a bad table with a missing region -- hole in meta
    * and data missing in the fs.
    */
-  @Test
+  @Test (timeout=180000)
   public void testRegionHole() throws Exception {
     TableName table =
         TableName.valueOf("tableRegionHole");
@@ -1020,7 +1021,7 @@ public class TestHBaseFsck {
    * This creates and fixes a bad table with a missing region -- hole in meta
    * and data present but .regioinfino missing (an orphan hdfs region)in the fs.
    */
-  @Test
+  @Test (timeout=180000)
   public void testHDFSRegioninfoMissing() throws Exception {
     TableName table =
         TableName.valueOf("tableHDFSRegioininfoMissing");
@@ -1057,7 +1058,7 @@ public class TestHBaseFsck {
    * This creates and fixes a bad table with a region that is missing meta and
    * not assigned to a region server.
    */
-  @Test
+  @Test (timeout=180000)
   public void testNotInMetaOrDeployedHole() throws Exception {
     TableName table =
         TableName.valueOf("tableNotInMetaOrDeployedHole");
@@ -1092,7 +1093,7 @@ public class TestHBaseFsck {
   /**
    * This creates fixes a bad table with a hole in meta.
    */
-  @Test
+  @Test (timeout=180000)
   public void testNotInMetaHole() throws Exception {
     TableName table =
         TableName.valueOf("tableNotInMetaHole");
@@ -1128,7 +1129,7 @@ public class TestHBaseFsck {
    * This creates and fixes a bad table with a region that is in meta but has
    * no deployment or data hdfs
    */
-  @Test
+  @Test (timeout=180000)
   public void testNotInHdfs() throws Exception {
     TableName table =
         TableName.valueOf("tableNotInHdfs");
@@ -1163,7 +1164,7 @@ public class TestHBaseFsck {
    * This creates entries in hbase:meta with no hdfs data.  This should cleanly
    * remove the table.
    */
-  @Test
+  @Test (timeout=180000)
   public void testNoHdfsTable() throws Exception {
     TableName table = TableName.valueOf("NoHdfsTable");
     setupTable(table);
@@ -1213,7 +1214,7 @@ public class TestHBaseFsck {
   /**
    * when the hbase.version file missing, It is fix the fault.
    */
-  @Test
+  @Test (timeout=180000)
   public void testNoVersionFile() throws Exception {
     // delete the hbase.version file
     Path rootDir = FSUtils.getRootDir(conf);
@@ -1234,7 +1235,7 @@ public class TestHBaseFsck {
   /**
    * The region is not deployed when the table is disabled.
    */
-  @Test
+  @Test (timeout=180000)
   public void testRegionShouldNotBeDeployed() throws Exception {
     TableName table =
         TableName.valueOf("tableRegionShouldNotBeDeployed");
@@ -1295,7 +1296,7 @@ public class TestHBaseFsck {
   /**
    * This creates two tables and mess both of them and fix them one by one
    */
-  @Test
+  @Test (timeout=180000)
   public void testFixByTable() throws Exception {
     TableName table1 =
         TableName.valueOf("testFixByTable1");
@@ -1341,7 +1342,7 @@ public class TestHBaseFsck {
   /**
    * A split parent in meta, in hdfs, and not deployed
    */
-  @Test
+  @Test (timeout=180000)
   public void testLingeringSplitParent() throws Exception {
     TableName table =
         TableName.valueOf("testLingeringSplitParent");
@@ -1421,7 +1422,7 @@ public class TestHBaseFsck {
    * Tests that LINGERING_SPLIT_PARENT is not erroneously reported for
    * valid cases where the daughters are there.
    */
-  @Test
+  @Test (timeout=180000)
   public void testValidLingeringSplitParent() throws Exception {
     TableName table =
         TableName.valueOf("testLingeringSplitParent");
@@ -1540,8 +1541,7 @@ public class TestHBaseFsck {
    */
   @Test(timeout=120000)
   public void testMissingFirstRegion() throws Exception {
-    TableName table =
-        TableName.valueOf("testMissingFirstRegion");
+    TableName table = TableName.valueOf("testMissingFirstRegion");
     try {
       setupTable(table);
       assertEquals(ROWKEYS.length, countRows());
@@ -1623,7 +1623,7 @@ public class TestHBaseFsck {
   /**
    * Test -noHdfsChecking option can detect and fix assignments issue.
    */
-  @Test
+  @Test (timeout=180000)
   public void testFixAssignmentsAndNoHdfsChecking() throws Exception {
     TableName table =
         TableName.valueOf("testFixAssignmentsAndNoHdfsChecking");
@@ -1673,7 +1673,7 @@ public class TestHBaseFsck {
    * However, it can not fix it without checking Hdfs because we need to get
    * the region info from Hdfs in this case, then to patch the meta.
    */
-  @Test
+  @Test (timeout=180000)
   public void testFixMetaNotWorkingWithNoHdfsChecking() throws Exception {
     TableName table =
         TableName.valueOf("testFixMetaNotWorkingWithNoHdfsChecking");
@@ -1727,7 +1727,7 @@ public class TestHBaseFsck {
    * Test -fixHdfsHoles doesn't work with -noHdfsChecking option,
    * and -noHdfsChecking can't detect orphan Hdfs region.
    */
-  @Test
+  @Test (timeout=180000)
   public void testFixHdfsHolesNotWorkingWithNoHdfsChecking() throws Exception {
     TableName table =
         TableName.valueOf("testFixHdfsHolesNotWorkingWithNoHdfsChecking");
@@ -1963,7 +1963,8 @@ public class TestHBaseFsck {
     final FileSystem fs = FileSystem.get(conf);
     HBaseFsck hbck = new HBaseFsck(conf, exec) {
       @Override
-      public HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles)
throws IOException {
+      public HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles)
+      throws IOException {
         return new HFileCorruptionChecker(conf, executor, sidelineCorruptHFiles) {
           AtomicBoolean attemptedFirstHFile = new AtomicBoolean(false);
           @Override
@@ -1982,7 +1983,7 @@ public class TestHBaseFsck {
   /**
    * Test fixing lingering reference file.
    */
-  @Test
+  @Test (timeout=180000)
   public void testLingeringReferenceFile() throws Exception {
     TableName table =
         TableName.valueOf("testLingeringReferenceFile");
@@ -2012,7 +2013,7 @@ public class TestHBaseFsck {
   /**
    * Test mission REGIONINFO_QUALIFIER in hbase:meta
    */
-  @Test
+  @Test (timeout=180000)
   public void testMissingRegionInfoQualifier() throws Exception {
     TableName table =
         TableName.valueOf("testMissingRegionInfoQualifier");
@@ -2066,7 +2067,7 @@ public class TestHBaseFsck {
    * Test pluggable error reporter. It can be plugged in
    * from system property or configuration.
    */
-  @Test
+  @Test (timeout=180000)
   public void testErrorReporter() throws Exception {
     try {
       MockErrorReporter.calledCount = 0;
@@ -2230,7 +2231,7 @@ public class TestHBaseFsck {
     writeLock.release(); // release for clean state
   }
 
-  @Test
+  @Test (timeout=180000)
   public void testMetaOffline() throws Exception {
     // check no errors
     HBaseFsck hbck = doFsck(conf, false);
@@ -2284,7 +2285,7 @@ public class TestHBaseFsck {
     }
   }
 
-  @Test
+  @Test (timeout=180000)
   public void testTableWithNoRegions() throws Exception {
     // We might end up with empty regions in a table
     // see also testNoHdfsTable()
@@ -2318,7 +2319,7 @@ public class TestHBaseFsck {
 
   }
 
-  @Test
+  @Test (timeout=180000)
   public void testHbckAfterRegionMerge() throws Exception {
     TableName table = TableName.valueOf("testMergeRegionFilesInHdfs");
     Table meta = null;
@@ -2366,7 +2367,7 @@ public class TestHBaseFsck {
     }
   }
 
-  @Test
+  @Test (timeout=180000)
   public void testRegionBoundariesCheck() throws Exception {
     HBaseFsck hbck = doFsck(conf, false);
     assertNoErrors(hbck); // no errors
@@ -2382,7 +2383,7 @@ public class TestHBaseFsck {
   @org.junit.Rule
   public TestName name = new TestName();
 
-  @Test
+  @Test (timeout=180000)
   public void testReadOnlyProperty() throws Exception {
     HBaseFsck hbck = doFsck(conf, false);
     Assert.assertEquals("shouldIgnorePreCheckPermission", true,


Mime
View raw message