Return-Path: X-Original-To: apmail-hbase-commits-archive@www.apache.org Delivered-To: apmail-hbase-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 152C318A30 for ; Wed, 11 Nov 2015 08:08:02 +0000 (UTC) Received: (qmail 65256 invoked by uid 500); 11 Nov 2015 08:08:02 -0000 Delivered-To: apmail-hbase-commits-archive@hbase.apache.org Received: (qmail 65219 invoked by uid 500); 11 Nov 2015 08:08:01 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 65207 invoked by uid 99); 11 Nov 2015 08:08:01 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 11 Nov 2015 08:08:01 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id BD61EE01CA; Wed, 11 Nov 2015 08:08:01 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: stack@apache.org To: commits@hbase.apache.org Message-Id: <720e4b17d47746229ad971129d7c54d3@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: hbase git commit: HBASE-14794 Cleanup TestAtomicOperation Date: Wed, 11 Nov 2015 08:08:01 +0000 (UTC) Repository: hbase Updated Branches: refs/heads/branch-1.2 4d6fed398 -> dbf9bc7bf HBASE-14794 Cleanup TestAtomicOperation hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java Fix a few missing table closes (This suite seems to leave loads of threads when test is done but have not figured the how yet). hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java Fix some missing table closes. We were leaving around client resources. hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java Close up WALs when done. Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dbf9bc7b Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dbf9bc7b Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dbf9bc7b Branch: refs/heads/branch-1.2 Commit: dbf9bc7bf1fdfe34a53aacc7aa863c130c9d8f41 Parents: 4d6fed3 Author: stack Authored: Tue Nov 10 18:57:04 2015 -1000 Committer: stack Committed: Tue Nov 10 22:07:56 2015 -1000 ---------------------------------------------------------------------- .../hbase/client/TestMetaWithReplicas.java | 137 +++---- .../hbase/mapreduce/TestImportExport.java | 376 ++++++++++--------- .../hbase/regionserver/TestAtomicOperation.java | 58 +-- 3 files changed, 296 insertions(+), 275 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hbase/blob/dbf9bc7b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java index 493013c..d0f6a97 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java @@ -174,64 +174,68 @@ public class TestMetaWithReplicas { util.getHBaseAdmin().disableTable(TABLE); util.getHBaseAdmin().deleteTable(TABLE); } - Table htable = util.createTable(TABLE, FAMILIES, conf); - - util.getHBaseAdmin().flush(TableName.META_TABLE_NAME); - Thread.sleep(conf.getInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, - 30000) * 6); - Connection c = ConnectionFactory.createConnection(util.getConfiguration()); - List regions = MetaTableAccessor.getTableRegions(zkw, c, - TableName.valueOf(TABLE)); - HRegionLocation hrl = MetaTableAccessor.getRegionLocation(c, regions.get(0)); - // Ensure that the primary server for test table is not the same one as the primary - // of the meta region since we will be killing the srv holding the meta's primary... - // We want to be able to write to the test table even when the meta is not present .. - // If the servers are the same, then move the test table's region out of the server - // to another random server - if (hrl.getServerName().equals(primary)) { - util.getHBaseAdmin().move(hrl.getRegionInfo().getEncodedNameAsBytes(), null); - // wait for the move to complete - do { - Thread.sleep(10); - hrl = MetaTableAccessor.getRegionLocation(c, regions.get(0)); - } while (primary.equals(hrl.getServerName())); - util.getHBaseAdmin().flush(TableName.META_TABLE_NAME); - Thread.sleep(conf.getInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, - 30000) * 3); - } - ServerName master = util.getHBaseClusterInterface().getClusterStatus().getMaster(); - // kill the master so that regionserver recovery is not triggered at all - // for the meta server - util.getHBaseClusterInterface().stopMaster(master); - util.getHBaseClusterInterface().waitForMasterToStop(master, 60000); - if (!master.equals(primary)) { - util.getHBaseClusterInterface().killRegionServer(primary); - util.getHBaseClusterInterface().waitForRegionServerToStop(primary, 60000); + ServerName master = null; + try (Connection c = ConnectionFactory.createConnection(util.getConfiguration());) { + try (Table htable = util.createTable(TABLE, FAMILIES, conf);) { + util.getHBaseAdmin().flush(TableName.META_TABLE_NAME); + Thread.sleep(conf.getInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, + 30000) * 6); + List regions = MetaTableAccessor.getTableRegions(zkw, c, + TableName.valueOf(TABLE)); + HRegionLocation hrl = MetaTableAccessor.getRegionLocation(c, regions.get(0)); + // Ensure that the primary server for test table is not the same one as the primary + // of the meta region since we will be killing the srv holding the meta's primary... + // We want to be able to write to the test table even when the meta is not present .. + // If the servers are the same, then move the test table's region out of the server + // to another random server + if (hrl.getServerName().equals(primary)) { + util.getHBaseAdmin().move(hrl.getRegionInfo().getEncodedNameAsBytes(), null); + // wait for the move to complete + do { + Thread.sleep(10); + hrl = MetaTableAccessor.getRegionLocation(c, regions.get(0)); + } while (primary.equals(hrl.getServerName())); + util.getHBaseAdmin().flush(TableName.META_TABLE_NAME); + Thread.sleep(conf.getInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, + 30000) * 3); + } + master = util.getHBaseClusterInterface().getClusterStatus().getMaster(); + // kill the master so that regionserver recovery is not triggered at all + // for the meta server + util.getHBaseClusterInterface().stopMaster(master); + util.getHBaseClusterInterface().waitForMasterToStop(master, 60000); + if (!master.equals(primary)) { + util.getHBaseClusterInterface().killRegionServer(primary); + util.getHBaseClusterInterface().waitForRegionServerToStop(primary, 60000); + } + ((ClusterConnection)c).clearRegionCache(); + } + Get get = null; + Result r = null; + byte[] row = "test".getBytes(); + try (Table htable = c.getTable(TableName.valueOf(TABLE));) { + Put put = new Put(row); + put.add("foo".getBytes(), row, row); + BufferedMutator m = c.getBufferedMutator(TableName.valueOf(TABLE)); + m.mutate(put); + m.flush(); + // Try to do a get of the row that was just put + get = new Get(row); + r = htable.get(get); + assertTrue(Arrays.equals(r.getRow(), row)); + // now start back the killed servers and disable use of replicas. That would mean + // calls go to the primary + util.getHBaseClusterInterface().startMaster(master.getHostname(), 0); + util.getHBaseClusterInterface().startRegionServer(primary.getHostname(), 0); + util.getHBaseClusterInterface().waitForActiveAndReadyMaster(); + ((ClusterConnection)c).clearRegionCache(); + } + conf.setBoolean(HConstants.USE_META_REPLICAS, false); + try (Table htable = c.getTable(TableName.valueOf(TABLE));) { + r = htable.get(get); + assertTrue(Arrays.equals(r.getRow(), row)); + } } - ((ClusterConnection)c).clearRegionCache(); - htable.close(); - htable = c.getTable(TableName.valueOf(TABLE)); - byte[] row = "test".getBytes(); - Put put = new Put(row); - put.add("foo".getBytes(), row, row); - BufferedMutator m = c.getBufferedMutator(TableName.valueOf(TABLE)); - m.mutate(put); - m.flush(); - // Try to do a get of the row that was just put - Get get = new Get(row); - Result r = htable.get(get); - assertTrue(Arrays.equals(r.getRow(), row)); - // now start back the killed servers and disable use of replicas. That would mean - // calls go to the primary - util.getHBaseClusterInterface().startMaster(master.getHostname(), 0); - util.getHBaseClusterInterface().startRegionServer(primary.getHostname(), 0); - util.getHBaseClusterInterface().waitForActiveAndReadyMaster(); - ((ClusterConnection)c).clearRegionCache(); - htable.close(); - conf.setBoolean(HConstants.USE_META_REPLICAS, false); - htable = c.getTable(TableName.valueOf(TABLE)); - r = htable.get(get); - assertTrue(Arrays.equals(r.getRow(), row)); } @Test @@ -242,13 +246,15 @@ public class TestMetaWithReplicas { TEST_UTIL.getHBaseAdmin().disableTable(TABLE); TEST_UTIL.getHBaseAdmin().deleteTable(TABLE); } - Table htable = TEST_UTIL.createTable(TABLE, FAMILIES, TEST_UTIL.getConfiguration()); - byte[] row = "test".getBytes(); - HConnectionImplementation c = ((HConnectionImplementation)((HTable)htable).connection); - // check that metalookup pool would get created - c.relocateRegion(TABLE, row); - ExecutorService ex = c.getCurrentMetaLookupPool(); - assert(ex != null); + try (Table htable = + TEST_UTIL.createTable(TABLE, FAMILIES, TEST_UTIL.getConfiguration());) { + byte[] row = "test".getBytes(); + HConnectionImplementation c = ((HConnectionImplementation)((HTable)htable).connection); + // check that metalookup pool would get created + c.relocateRegion(TABLE, row); + ExecutorService ex = c.getCurrentMetaLookupPool(); + assert(ex != null); + } } @Test @@ -408,7 +414,6 @@ public class TestMetaWithReplicas { @Test public void testHBaseFsckWithExcessMetaReplicas() throws Exception { - HBaseFsck hbck = new HBaseFsck(TEST_UTIL.getConfiguration()); // Create a meta replica (this will be the 4th one) and assign it HRegionInfo h = RegionReplicaUtil.getRegionInfoForReplica( HRegionInfo.FIRST_META_REGIONINFO, 3); @@ -418,7 +423,7 @@ public class TestMetaWithReplicas { TEST_UTIL.getMiniHBaseCluster().getMaster().assignRegion(h); HBaseFsckRepair.waitUntilAssigned(TEST_UTIL.getHBaseAdmin(), h); // check that problem exists - hbck = doFsck(TEST_UTIL.getConfiguration(), false); + HBaseFsck hbck = doFsck(TEST_UTIL.getConfiguration(), false); assertErrors(hbck, new ERROR_CODE[]{ERROR_CODE.UNKNOWN, ERROR_CODE.SHOULD_NOT_BE_DEPLOYED}); // fix the problem hbck = doFsck(TEST_UTIL.getConfiguration(), true); http://git-wip-us.apache.org/repos/asf/hbase/blob/dbf9bc7b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index 79e635b..2faac62 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -40,19 +40,18 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -65,10 +64,11 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.Import.KeyValueImporter; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; -import org.apache.hadoop.hbase.wal.WAL; -import org.apache.hadoop.hbase.wal.WALKey; +import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.LauncherSecurityManager; +import org.apache.hadoop.hbase.wal.WAL; +import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper.Context; import org.apache.hadoop.util.GenericOptionsParser; @@ -85,7 +85,7 @@ import org.mockito.stubbing.Answer; /** * Tests the table import and table export MR job functionality */ -@Category(MediumTests.class) +@Category(LargeTests.class) public class TestImportExport { private static final Log LOG = LogFactory.getLog(TestImportExport.class); private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); @@ -133,9 +133,11 @@ public class TestImportExport { * @throws InterruptedException * @throws ClassNotFoundException */ - boolean runExport(String[] args) throws IOException, InterruptedException, ClassNotFoundException { + boolean runExport(String[] args) + throws IOException, InterruptedException, ClassNotFoundException { // need to make a copy of the configuration because to make sure different temp dirs are used. - GenericOptionsParser opts = new GenericOptionsParser(new Configuration(UTIL.getConfiguration()), args); + GenericOptionsParser opts = + new GenericOptionsParser(new Configuration(UTIL.getConfiguration()), args); Configuration conf = opts.getConfiguration(); args = opts.getRemainingArgs(); Job job = Export.createSubmittableJob(conf, args); @@ -151,9 +153,11 @@ public class TestImportExport { * @throws InterruptedException * @throws ClassNotFoundException */ - boolean runImport(String[] args) throws IOException, InterruptedException, ClassNotFoundException { + boolean runImport(String[] args) + throws IOException, InterruptedException, ClassNotFoundException { // need to make a copy of the configuration because to make sure different temp dirs are used. - GenericOptionsParser opts = new GenericOptionsParser(new Configuration(UTIL.getConfiguration()), args); + GenericOptionsParser opts = + new GenericOptionsParser(new Configuration(UTIL.getConfiguration()), args); Configuration conf = opts.getConfiguration(); args = opts.getRemainingArgs(); Job job = Import.createSubmittableJob(conf, args); @@ -168,17 +172,18 @@ public class TestImportExport { @Test public void testSimpleCase() throws Exception { String EXPORT_TABLE = "exportSimpleCase"; - Table t = UTIL.createTable(TableName.valueOf(EXPORT_TABLE), FAMILYA, 3); - Put p = new Put(ROW1); - p.add(FAMILYA, QUAL, now, QUAL); - p.add(FAMILYA, QUAL, now+1, QUAL); - p.add(FAMILYA, QUAL, now+2, QUAL); - t.put(p); - p = new Put(ROW2); - p.add(FAMILYA, QUAL, now, QUAL); - p.add(FAMILYA, QUAL, now+1, QUAL); - p.add(FAMILYA, QUAL, now+2, QUAL); - t.put(p); + try (Table t = UTIL.createTable(TableName.valueOf(EXPORT_TABLE), FAMILYA, 3);) { + Put p = new Put(ROW1); + p.add(FAMILYA, QUAL, now, QUAL); + p.add(FAMILYA, QUAL, now+1, QUAL); + p.add(FAMILYA, QUAL, now+2, QUAL); + t.put(p); + p = new Put(ROW2); + p.add(FAMILYA, QUAL, now, QUAL); + p.add(FAMILYA, QUAL, now+1, QUAL); + p.add(FAMILYA, QUAL, now+2, QUAL); + t.put(p); + } String[] args = new String[] { EXPORT_TABLE, @@ -188,22 +193,23 @@ public class TestImportExport { assertTrue(runExport(args)); String IMPORT_TABLE = "importTableSimpleCase"; - t = UTIL.createTable(TableName.valueOf(IMPORT_TABLE), FAMILYB, 3); - args = new String[] { + try (Table t = UTIL.createTable(TableName.valueOf(IMPORT_TABLE), FAMILYB, 3);) { + args = new String[] { "-D" + Import.CF_RENAME_PROP + "="+FAMILYA_STRING+":"+FAMILYB_STRING, IMPORT_TABLE, FQ_OUTPUT_DIR - }; - assertTrue(runImport(args)); - - Get g = new Get(ROW1); - g.setMaxVersions(); - Result r = t.get(g); - assertEquals(3, r.size()); - g = new Get(ROW2); - g.setMaxVersions(); - r = t.get(g); - assertEquals(3, r.size()); + }; + assertTrue(runImport(args)); + + Get g = new Get(ROW1); + g.setMaxVersions(); + Result r = t.get(g); + assertEquals(3, r.size()); + g = new Get(ROW2); + g.setMaxVersions(); + r = t.get(g); + assertEquals(3, r.size()); + } } /** @@ -237,23 +243,22 @@ public class TestImportExport { FileSystem fs = FileSystem.get(UTIL.getConfiguration()); fs.copyFromLocalFile(importPath, new Path(FQ_OUTPUT_DIR + Path.SEPARATOR + name)); String IMPORT_TABLE = name; - Table t = UTIL.createTable(TableName.valueOf(IMPORT_TABLE), Bytes.toBytes("f1"), 3); - String[] args = new String[] { - "-Dhbase.import.version=0.94" , - IMPORT_TABLE, FQ_OUTPUT_DIR - }; - assertTrue(runImport(args)); - - /* exportedTableIn94Format contains 5 rows - ROW COLUMN+CELL - r1 column=f1:c1, timestamp=1383766761171, value=val1 - r2 column=f1:c1, timestamp=1383766771642, value=val2 - r3 column=f1:c1, timestamp=1383766777615, value=val3 - r4 column=f1:c1, timestamp=1383766785146, value=val4 - r5 column=f1:c1, timestamp=1383766791506, value=val5 - */ - assertEquals(5, UTIL.countRows(t)); - t.close(); + try (Table t = UTIL.createTable(TableName.valueOf(IMPORT_TABLE), Bytes.toBytes("f1"), 3);) { + String[] args = new String[] { + "-Dhbase.import.version=0.94" , + IMPORT_TABLE, FQ_OUTPUT_DIR + }; + assertTrue(runImport(args)); + /* exportedTableIn94Format contains 5 rows + ROW COLUMN+CELL + r1 column=f1:c1, timestamp=1383766761171, value=val1 + r2 column=f1:c1, timestamp=1383766771642, value=val2 + r3 column=f1:c1, timestamp=1383766777615, value=val3 + r4 column=f1:c1, timestamp=1383766785146, value=val4 + r5 column=f1:c1, timestamp=1383766791506, value=val5 + */ + assertEquals(5, UTIL.countRows(t)); + } } /** @@ -262,35 +267,30 @@ public class TestImportExport { @Test public void testExportScannerBatching() throws Exception { String BATCH_TABLE = "exportWithBatch"; - HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(BATCH_TABLE)); - desc.addFamily(new HColumnDescriptor(FAMILYA) - .setMaxVersions(1) - ); - UTIL.getHBaseAdmin().createTable(desc); - Table t = new HTable(UTIL.getConfiguration(), desc.getTableName()); - - Put p = new Put(ROW1); - p.add(FAMILYA, QUAL, now, QUAL); - p.add(FAMILYA, QUAL, now+1, QUAL); - p.add(FAMILYA, QUAL, now+2, QUAL); - p.add(FAMILYA, QUAL, now+3, QUAL); - p.add(FAMILYA, QUAL, now+4, QUAL); - t.put(p); - - String[] args = new String[] { + try (Table t = UTIL.createTable(TableName.valueOf(BATCH_TABLE), FAMILYA, 1);) { + Put p = new Put(ROW1); + p.add(FAMILYA, QUAL, now, QUAL); + p.add(FAMILYA, QUAL, now+1, QUAL); + p.add(FAMILYA, QUAL, now+2, QUAL); + p.add(FAMILYA, QUAL, now+3, QUAL); + p.add(FAMILYA, QUAL, now+4, QUAL); + t.put(p); + + String[] args = new String[] { "-D" + Export.EXPORT_BATCHING + "=" + EXPORT_BATCH_SIZE, // added scanner batching arg. BATCH_TABLE, FQ_OUTPUT_DIR - }; - assertTrue(runExport(args)); + }; + assertTrue(runExport(args)); - FileSystem fs = FileSystem.get(UTIL.getConfiguration()); - fs.delete(new Path(FQ_OUTPUT_DIR), true); - t.close(); + FileSystem fs = FileSystem.get(UTIL.getConfiguration()); + fs.delete(new Path(FQ_OUTPUT_DIR), true); + } } @Test public void testWithDeletes() throws Exception { + String IMPORT_TABLE = "importWithDeletes"; String EXPORT_TABLE = "exportWithDeletes"; HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(EXPORT_TABLE)); desc.addFamily(new HColumnDescriptor(FAMILYA) @@ -298,62 +298,59 @@ public class TestImportExport { .setKeepDeletedCells(true) ); UTIL.getHBaseAdmin().createTable(desc); - Table t = new HTable(UTIL.getConfiguration(), desc.getTableName()); - - Put p = new Put(ROW1); - p.add(FAMILYA, QUAL, now, QUAL); - p.add(FAMILYA, QUAL, now+1, QUAL); - p.add(FAMILYA, QUAL, now+2, QUAL); - p.add(FAMILYA, QUAL, now+3, QUAL); - p.add(FAMILYA, QUAL, now+4, QUAL); - t.put(p); - - Delete d = new Delete(ROW1, now+3); - t.delete(d); - d = new Delete(ROW1); - d.deleteColumns(FAMILYA, QUAL, now+2); - t.delete(d); - - String[] args = new String[] { + try (Table t = UTIL.getConnection().getTable(TableName.valueOf(EXPORT_TABLE));) { + Put p = new Put(ROW1); + p.add(FAMILYA, QUAL, now, QUAL); + p.add(FAMILYA, QUAL, now+1, QUAL); + p.add(FAMILYA, QUAL, now+2, QUAL); + p.add(FAMILYA, QUAL, now+3, QUAL); + p.add(FAMILYA, QUAL, now+4, QUAL); + t.put(p); + + Delete d = new Delete(ROW1, now+3); + t.delete(d); + d = new Delete(ROW1); + d.deleteColumns(FAMILYA, QUAL, now+2); + t.delete(d); + + String[] args = new String[] { "-D" + Export.RAW_SCAN + "=true", EXPORT_TABLE, FQ_OUTPUT_DIR, "1000", // max number of key versions per key to export - }; - assertTrue(runExport(args)); + }; + assertTrue(runExport(args)); - String IMPORT_TABLE = "importWithDeletes"; - desc = new HTableDescriptor(TableName.valueOf(IMPORT_TABLE)); - desc.addFamily(new HColumnDescriptor(FAMILYA) + desc = new HTableDescriptor(TableName.valueOf(IMPORT_TABLE)); + desc.addFamily(new HColumnDescriptor(FAMILYA) .setMaxVersions(5) .setKeepDeletedCells(true) - ); + ); + } UTIL.getHBaseAdmin().createTable(desc); - t.close(); - t = new HTable(UTIL.getConfiguration(), desc.getTableName()); - args = new String[] { + try (Table t = UTIL.getConnection().getTable(desc.getTableName());) { + String [] args = new String[] { IMPORT_TABLE, FQ_OUTPUT_DIR - }; - assertTrue(runImport(args)); - - Scan s = new Scan(); - s.setMaxVersions(); - s.setRaw(true); - ResultScanner scanner = t.getScanner(s); - Result r = scanner.next(); - Cell[] res = r.rawCells(); - assertTrue(CellUtil.isDeleteFamily(res[0])); - assertEquals(now+4, res[1].getTimestamp()); - assertEquals(now+3, res[2].getTimestamp()); - assertTrue(CellUtil.isDelete(res[3])); - assertEquals(now+2, res[4].getTimestamp()); - assertEquals(now+1, res[5].getTimestamp()); - assertEquals(now, res[6].getTimestamp()); - t.close(); + }; + assertTrue(runImport(args)); + + Scan s = new Scan(); + s.setMaxVersions(); + s.setRaw(true); + ResultScanner scanner = t.getScanner(s); + Result r = scanner.next(); + Cell[] res = r.rawCells(); + assertTrue(CellUtil.isDeleteFamily(res[0])); + assertEquals(now+4, res[1].getTimestamp()); + assertEquals(now+3, res[2].getTimestamp()); + assertTrue(CellUtil.isDelete(res[3])); + assertEquals(now+2, res[4].getTimestamp()); + assertEquals(now+1, res[5].getTimestamp()); + assertEquals(now, res[6].getTimestamp()); + } } - - + @Test public void testWithMultipleDeleteFamilyMarkersOfSameRowSameFamily() throws Exception { String EXPORT_TABLE = "exportWithMultipleDeleteFamilyMarkersOfSameRowSameFamily"; @@ -363,16 +360,15 @@ public class TestImportExport { .setKeepDeletedCells(true) ); UTIL.getHBaseAdmin().createTable(desc); - HTable exportT = new HTable(UTIL.getConfiguration(), EXPORT_TABLE); + Table exportT = UTIL.getConnection().getTable(desc.getTableName()); + //Add first version of QUAL + Put p = new Put(ROW1); + p.add(FAMILYA, QUAL, now, QUAL); + exportT.put(p); - //Add first version of QUAL - Put p = new Put(ROW1); - p.add(FAMILYA, QUAL, now, QUAL); - exportT.put(p); - - //Add Delete family marker - Delete d = new Delete(ROW1, now+3); - exportT.delete(d); + //Add Delete family marker + Delete d = new Delete(ROW1, now+3); + exportT.delete(d); //Add second version of QUAL p = new Put(ROW1); @@ -382,8 +378,8 @@ public class TestImportExport { //Add second Delete family marker d = new Delete(ROW1, now+7); exportT.delete(d); - - + + String[] args = new String[] { "-D" + Export.RAW_SCAN + "=true", EXPORT_TABLE, @@ -399,8 +395,8 @@ public class TestImportExport { .setKeepDeletedCells(true) ); UTIL.getHBaseAdmin().createTable(desc); - - HTable importT = new HTable(UTIL.getConfiguration(), IMPORT_TABLE); + + Table importT = UTIL.getConnection().getTable(desc.getTableName()); args = new String[] { IMPORT_TABLE, FQ_OUTPUT_DIR @@ -410,21 +406,17 @@ public class TestImportExport { Scan s = new Scan(); s.setMaxVersions(); s.setRaw(true); - + ResultScanner importedTScanner = importT.getScanner(s); Result importedTResult = importedTScanner.next(); - + ResultScanner exportedTScanner = exportT.getScanner(s); Result exportedTResult = exportedTScanner.next(); - try - { + try { Result.compareResults(exportedTResult, importedTResult); - } - catch (Exception e) { + } catch (Exception e) { fail("Original and imported tables data comparision failed with error:"+e.getMessage()); - } - finally - { + } finally { exportT.close(); importT.close(); } @@ -441,7 +433,7 @@ public class TestImportExport { HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(EXPORT_TABLE)); desc.addFamily(new HColumnDescriptor(FAMILYA).setMaxVersions(5)); UTIL.getHBaseAdmin().createTable(desc); - Table exportTable = new HTable(UTIL.getConfiguration(), desc.getTableName()); + Table exportTable = UTIL.getConnection().getTable(desc.getTableName()); Put p1 = new Put(ROW1); p1.add(FAMILYA, QUAL, now, QUAL); @@ -466,9 +458,10 @@ public class TestImportExport { desc.addFamily(new HColumnDescriptor(FAMILYA).setMaxVersions(5)); UTIL.getHBaseAdmin().createTable(desc); - Table importTable = new HTable(UTIL.getConfiguration(), desc.getTableName()); + Table importTable = UTIL.getConnection().getTable(desc.getTableName()); args = new String[] { "-D" + Import.FILTER_CLASS_CONF_KEY + "=" + PrefixFilter.class.getName(), - "-D" + Import.FILTER_ARGS_CONF_KEY + "=" + Bytes.toString(ROW1), IMPORT_TABLE, FQ_OUTPUT_DIR, + "-D" + Import.FILTER_ARGS_CONF_KEY + "=" + Bytes.toString(ROW1), IMPORT_TABLE, + FQ_OUTPUT_DIR, "1000" }; assertTrue(runImport(args)); @@ -511,7 +504,7 @@ public class TestImportExport { results.close(); return count; } - + /** * test main method. Import should print help and call System.exit */ @@ -623,7 +616,7 @@ public class TestImportExport { args.add("param2"); Import.addFilterAndArguments(configuration, FilterBase.class, args); - assertEquals("org.apache.hadoop.hbase.filter.FilterBase", + assertEquals("org.apache.hadoop.hbase.filter.FilterBase", configuration.get(Import.FILTER_CLASS_CONF_KEY)); assertEquals("param1,param2", configuration.get(Import.FILTER_ARGS_CONF_KEY)); } @@ -632,20 +625,20 @@ public class TestImportExport { public void testDurability() throws IOException, InterruptedException, ClassNotFoundException { // Create an export table. String exportTableName = "exporttestDurability"; - Table exportTable = UTIL.createTable(TableName.valueOf(exportTableName), FAMILYA, 3); - - // Insert some data - Put put = new Put(ROW1); - put.add(FAMILYA, QUAL, now, QUAL); - put.add(FAMILYA, QUAL, now + 1, QUAL); - put.add(FAMILYA, QUAL, now + 2, QUAL); - exportTable.put(put); - - put = new Put(ROW2); - put.add(FAMILYA, QUAL, now, QUAL); - put.add(FAMILYA, QUAL, now + 1, QUAL); - put.add(FAMILYA, QUAL, now + 2, QUAL); - exportTable.put(put); + try (Table exportTable = UTIL.createTable(TableName.valueOf(exportTableName), FAMILYA, 3);) { + // Insert some data + Put put = new Put(ROW1); + put.add(FAMILYA, QUAL, now, QUAL); + put.add(FAMILYA, QUAL, now + 1, QUAL); + put.add(FAMILYA, QUAL, now + 2, QUAL); + exportTable.put(put); + + put = new Put(ROW2); + put.add(FAMILYA, QUAL, now, QUAL); + put.add(FAMILYA, QUAL, now + 1, QUAL); + put.add(FAMILYA, QUAL, now + 2, QUAL); + exportTable.put(put); + } // Run the export String[] args = new String[] { exportTableName, FQ_OUTPUT_DIR, "1000"}; @@ -653,35 +646,46 @@ public class TestImportExport { // Create the table for import String importTableName = "importTestDurability1"; - Table importTable = UTIL.createTable(TableName.valueOf(importTableName), FAMILYA, 3); - - // Register the wal listener for the import table - TableWALActionListener walListener = new TableWALActionListener(importTableName); - WAL wal = UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL(null); - wal.registerWALActionsListener(walListener); - - // Run the import with SKIP_WAL - args = - new String[] { "-D" + Import.WAL_DURABILITY + "=" + Durability.SKIP_WAL.name(), + WAL wal = null; + HRegionInfo region = null; + TableWALActionListener walListener = null; + try (Table importTable = + UTIL.createTable(TableName.valueOf(importTableName), FAMILYA, 3);) { + + // Register the wal listener for the import table + walListener = new TableWALActionListener(importTableName); + region = UTIL.getHBaseCluster().getRegionServerThreads().get(0).getRegionServer() + .getOnlineRegions(importTable.getName()).get(0).getRegionInfo(); + wal = UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL(region); + wal.registerWALActionsListener(walListener); + + // Run the import with SKIP_WAL + args = + new String[] { "-D" + Import.WAL_DURABILITY + "=" + Durability.SKIP_WAL.name(), importTableName, FQ_OUTPUT_DIR }; - assertTrue(runImport(args)); - //Assert that the wal is not visisted - assertTrue(!walListener.isWALVisited()); - //Ensure that the count is 2 (only one version of key value is obtained) - assertTrue(getCount(importTable, null) == 2); + assertTrue(runImport(args)); + //Assert that the wal is not visisted + assertTrue(!walListener.isWALVisited()); + //Ensure that the count is 2 (only one version of key value is obtained) + assertTrue(getCount(importTable, null) == 2); - // Run the import with the default durability option + // Run the import with the default durability option + } importTableName = "importTestDurability2"; - importTable = UTIL.createTable(TableName.valueOf(importTableName), FAMILYA, 3); - wal.unregisterWALActionsListener(walListener); - walListener = new TableWALActionListener(importTableName); - wal.registerWALActionsListener(walListener); - args = new String[] { importTableName, FQ_OUTPUT_DIR }; - assertTrue(runImport(args)); - //Assert that the wal is visisted - assertTrue(walListener.isWALVisited()); - //Ensure that the count is 2 (only one version of key value is obtained) - assertTrue(getCount(importTable, null) == 2); + try (Table importTable = + UTIL.createTable(TableName.valueOf(importTableName), FAMILYA, 3);) { + region = UTIL.getHBaseCluster().getRegionServerThreads().get(0).getRegionServer() + .getOnlineRegions(importTable.getName()).get(0).getRegionInfo(); + wal = UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL(region); + walListener = new TableWALActionListener(importTableName); + wal.registerWALActionsListener(walListener); + args = new String[] { importTableName, FQ_OUTPUT_DIR }; + assertTrue(runImport(args)); + //Assert that the wal is visisted + assertTrue(walListener.isWALVisited()); + //Ensure that the count is 2 (only one version of key value is obtained) + assertTrue(getCount(importTable, null) == 2); + } } /** @@ -707,5 +711,5 @@ public class TestImportExport { public boolean isWALVisited() { return isVisited; } - } -} + } +} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hbase/blob/dbf9bc7b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java index bc4d96e..6cef518 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.regionserver; import static org.apache.hadoop.hbase.HBaseTestingUtility.fam1; import static org.apache.hadoop.hbase.HBaseTestingUtility.fam2; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertNull; import static org.junit.Assert.fail; @@ -60,6 +61,8 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.BinaryComparator; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.io.HeapSize; +import org.apache.hadoop.hbase.io.hfile.BlockCache; +import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.WAL; @@ -92,21 +95,25 @@ public class TestAtomicOperation { static final byte [] row = Bytes.toBytes("rowA"); static final byte [] row2 = Bytes.toBytes("rowB"); - @Before + @Before public void setup() { tableName = Bytes.toBytes(name.getMethodName()); } - + @After public void teardown() throws IOException { if (region != null) { + BlockCache bc = region.getStores().get(0).getCacheConfig().getBlockCache(); ((HRegion)region).close(); + WAL wal = ((HRegion)region).getWAL(); + if (wal != null) wal.close(); + if (bc != null) bc.shutdown(); region = null; } } ////////////////////////////////////////////////////////////////////////////// // New tests that doesn't spin up a mini cluster but rather just test the - // individual code pieces in the HRegion. + // individual code pieces in the HRegion. ////////////////////////////////////////////////////////////////////////////// /** @@ -138,17 +145,15 @@ public class TestAtomicOperation { */ @Test public void testIncrementMultiThreads() throws IOException { - LOG.info("Starting test testIncrementMultiThreads"); // run a with mixed column families (1 and 3 versions) initHRegion(tableName, name.getMethodName(), new int[] {1,3}, fam1, fam2); - // create 100 threads, each will increment by its own quantity - int numThreads = 100; + // create 25 threads, each will increment by its own quantity + int numThreads = 25; int incrementsPerThread = 1000; Incrementer[] all = new Incrementer[numThreads]; int expectedTotal = 0; - // create all threads for (int i = 0; i < numThreads; i++) { all[i] = new Incrementer(region, i, i, incrementsPerThread); @@ -165,13 +170,13 @@ public class TestAtomicOperation { try { all[i].join(); } catch (InterruptedException e) { + LOG.info("Ignored", e); } } assertICV(row, fam1, qual1, expectedTotal); assertICV(row, fam1, qual2, expectedTotal*2); assertICV(row, fam2, qual3, expectedTotal*3); - LOG.info("testIncrementMultiThreads successfully verified that total is " + - expectedTotal); + LOG.info("testIncrementMultiThreads successfully verified that total is " + expectedTotal); } @@ -222,6 +227,7 @@ public class TestAtomicOperation { public Incrementer(Region region, int threadNumber, int amount, int numIncrements) { + super("incrementer." + threadNumber); this.region = region; this.numIncrements = numIncrements; this.amount = amount; @@ -230,7 +236,7 @@ public class TestAtomicOperation { @Override public void run() { - for (int i=0; i