hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject hbase git commit: HBASE-14794 Cleanup TestAtomicOperation
Date Wed, 11 Nov 2015 04:57:43 GMT
Repository: hbase
Updated Branches:
  refs/heads/branch-1 37815cac9 -> 7280ec09d


HBASE-14794 Cleanup TestAtomicOperation

    hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
      Fix a few missing table closes (This suite seems to leave loads of threads
      when test is done but have not figured the how yet).

    hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
      Fix some missing table closes. We were leaving around client
      resources.

    hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
      Close up WALs when done.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7280ec09
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7280ec09
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7280ec09

Branch: refs/heads/branch-1
Commit: 7280ec09df4cfe108943ce9e550edb1455244305
Parents: 37815ca
Author: stack <stack@apache.org>
Authored: Tue Nov 10 18:57:04 2015 -1000
Committer: stack <stack@apache.org>
Committed: Tue Nov 10 18:57:35 2015 -1000

----------------------------------------------------------------------
 .../hbase/client/TestMetaWithReplicas.java      | 137 ++++----
 .../hbase/mapreduce/TestImportExport.java       | 339 ++++++++++---------
 .../hbase/regionserver/TestAtomicOperation.java |  58 ++--
 3 files changed, 276 insertions(+), 258 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/7280ec09/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
index 493013c..d0f6a97 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
@@ -174,64 +174,68 @@ public class TestMetaWithReplicas {
       util.getHBaseAdmin().disableTable(TABLE);
       util.getHBaseAdmin().deleteTable(TABLE);
     }
-    Table htable = util.createTable(TABLE, FAMILIES, conf);
-
-    util.getHBaseAdmin().flush(TableName.META_TABLE_NAME);
-    Thread.sleep(conf.getInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD,
-       30000) * 6);
-    Connection c = ConnectionFactory.createConnection(util.getConfiguration());
-    List<HRegionInfo> regions = MetaTableAccessor.getTableRegions(zkw, c,
-        TableName.valueOf(TABLE));
-    HRegionLocation hrl = MetaTableAccessor.getRegionLocation(c, regions.get(0));
-    // Ensure that the primary server for test table is not the same one as the primary
-    // of the meta region since we will be killing the srv holding the meta's primary...
-    // We want to be able to write to the test table even when the meta is not present ..
-    // If the servers are the same, then move the test table's region out of the server
-    // to another random server
-    if (hrl.getServerName().equals(primary)) {
-      util.getHBaseAdmin().move(hrl.getRegionInfo().getEncodedNameAsBytes(), null);
-      // wait for the move to complete
-      do {
-        Thread.sleep(10);
-        hrl = MetaTableAccessor.getRegionLocation(c, regions.get(0));
-      } while (primary.equals(hrl.getServerName()));
-      util.getHBaseAdmin().flush(TableName.META_TABLE_NAME);
-      Thread.sleep(conf.getInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD,
-         30000) * 3);
-    }
-    ServerName master = util.getHBaseClusterInterface().getClusterStatus().getMaster();
-    // kill the master so that regionserver recovery is not triggered at all
-    // for the meta server
-    util.getHBaseClusterInterface().stopMaster(master);
-    util.getHBaseClusterInterface().waitForMasterToStop(master, 60000);
-    if (!master.equals(primary)) {
-      util.getHBaseClusterInterface().killRegionServer(primary);
-      util.getHBaseClusterInterface().waitForRegionServerToStop(primary, 60000);
+    ServerName master = null;
+    try (Connection c = ConnectionFactory.createConnection(util.getConfiguration());) {
+      try (Table htable = util.createTable(TABLE, FAMILIES, conf);) {
+        util.getHBaseAdmin().flush(TableName.META_TABLE_NAME);
+        Thread.sleep(conf.getInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD,
+           30000) * 6);
+        List<HRegionInfo> regions = MetaTableAccessor.getTableRegions(zkw, c,
+          TableName.valueOf(TABLE));
+        HRegionLocation hrl = MetaTableAccessor.getRegionLocation(c, regions.get(0));
+        // Ensure that the primary server for test table is not the same one as the primary
+        // of the meta region since we will be killing the srv holding the meta's primary...
+        // We want to be able to write to the test table even when the meta is not present
..
+        // If the servers are the same, then move the test table's region out of the server
+        // to another random server
+        if (hrl.getServerName().equals(primary)) {
+          util.getHBaseAdmin().move(hrl.getRegionInfo().getEncodedNameAsBytes(), null);
+          // wait for the move to complete
+          do {
+            Thread.sleep(10);
+            hrl = MetaTableAccessor.getRegionLocation(c, regions.get(0));
+          } while (primary.equals(hrl.getServerName()));
+          util.getHBaseAdmin().flush(TableName.META_TABLE_NAME);
+          Thread.sleep(conf.getInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD,
+             30000) * 3);
+        }
+        master = util.getHBaseClusterInterface().getClusterStatus().getMaster();
+        // kill the master so that regionserver recovery is not triggered at all
+        // for the meta server
+        util.getHBaseClusterInterface().stopMaster(master);
+        util.getHBaseClusterInterface().waitForMasterToStop(master, 60000);
+        if (!master.equals(primary)) {
+          util.getHBaseClusterInterface().killRegionServer(primary);
+          util.getHBaseClusterInterface().waitForRegionServerToStop(primary, 60000);
+        }
+        ((ClusterConnection)c).clearRegionCache();
+      }
+      Get get = null;
+      Result r = null;
+      byte[] row = "test".getBytes();
+      try (Table htable = c.getTable(TableName.valueOf(TABLE));) {
+        Put put = new Put(row);
+        put.add("foo".getBytes(), row, row);
+        BufferedMutator m = c.getBufferedMutator(TableName.valueOf(TABLE));
+        m.mutate(put);
+        m.flush();
+        // Try to do a get of the row that was just put
+        get = new Get(row);
+        r = htable.get(get);
+        assertTrue(Arrays.equals(r.getRow(), row));
+        // now start back the killed servers and disable use of replicas. That would mean
+        // calls go to the primary
+        util.getHBaseClusterInterface().startMaster(master.getHostname(), 0);
+        util.getHBaseClusterInterface().startRegionServer(primary.getHostname(), 0);
+        util.getHBaseClusterInterface().waitForActiveAndReadyMaster();
+        ((ClusterConnection)c).clearRegionCache();
+      }
+      conf.setBoolean(HConstants.USE_META_REPLICAS, false);
+      try (Table htable = c.getTable(TableName.valueOf(TABLE));) {
+        r = htable.get(get);
+        assertTrue(Arrays.equals(r.getRow(), row));
+      }
     }
-    ((ClusterConnection)c).clearRegionCache();
-    htable.close();
-    htable = c.getTable(TableName.valueOf(TABLE));
-    byte[] row = "test".getBytes();
-    Put put = new Put(row);
-    put.add("foo".getBytes(), row, row);
-    BufferedMutator m = c.getBufferedMutator(TableName.valueOf(TABLE));
-    m.mutate(put);
-    m.flush();
-    // Try to do a get of the row that was just put
-    Get get = new Get(row);
-    Result r = htable.get(get);
-    assertTrue(Arrays.equals(r.getRow(), row));
-    // now start back the killed servers and disable use of replicas. That would mean
-    // calls go to the primary
-    util.getHBaseClusterInterface().startMaster(master.getHostname(), 0);
-    util.getHBaseClusterInterface().startRegionServer(primary.getHostname(), 0);
-    util.getHBaseClusterInterface().waitForActiveAndReadyMaster();
-    ((ClusterConnection)c).clearRegionCache();
-    htable.close();
-    conf.setBoolean(HConstants.USE_META_REPLICAS, false);
-    htable = c.getTable(TableName.valueOf(TABLE));
-    r = htable.get(get);
-    assertTrue(Arrays.equals(r.getRow(), row));
   }
 
   @Test
@@ -242,13 +246,15 @@ public class TestMetaWithReplicas {
       TEST_UTIL.getHBaseAdmin().disableTable(TABLE);
       TEST_UTIL.getHBaseAdmin().deleteTable(TABLE);
     }
-    Table htable = TEST_UTIL.createTable(TABLE, FAMILIES, TEST_UTIL.getConfiguration());
-    byte[] row = "test".getBytes();
-    HConnectionImplementation c = ((HConnectionImplementation)((HTable)htable).connection);
-    // check that metalookup pool would get created
-    c.relocateRegion(TABLE, row);
-    ExecutorService ex = c.getCurrentMetaLookupPool();
-    assert(ex != null);
+    try (Table htable =
+        TEST_UTIL.createTable(TABLE, FAMILIES, TEST_UTIL.getConfiguration());) {
+      byte[] row = "test".getBytes();
+      HConnectionImplementation c = ((HConnectionImplementation)((HTable)htable).connection);
+      // check that metalookup pool would get created
+      c.relocateRegion(TABLE, row);
+      ExecutorService ex = c.getCurrentMetaLookupPool();
+      assert(ex != null);
+    }
   }
 
   @Test
@@ -408,7 +414,6 @@ public class TestMetaWithReplicas {
 
   @Test
   public void testHBaseFsckWithExcessMetaReplicas() throws Exception {
-    HBaseFsck hbck = new HBaseFsck(TEST_UTIL.getConfiguration());
     // Create a meta replica (this will be the 4th one) and assign it
     HRegionInfo h = RegionReplicaUtil.getRegionInfoForReplica(
         HRegionInfo.FIRST_META_REGIONINFO, 3);
@@ -418,7 +423,7 @@ public class TestMetaWithReplicas {
     TEST_UTIL.getMiniHBaseCluster().getMaster().assignRegion(h);
     HBaseFsckRepair.waitUntilAssigned(TEST_UTIL.getHBaseAdmin(), h);
     // check that problem exists
-    hbck = doFsck(TEST_UTIL.getConfiguration(), false);
+    HBaseFsck hbck = doFsck(TEST_UTIL.getConfiguration(), false);
     assertErrors(hbck, new ERROR_CODE[]{ERROR_CODE.UNKNOWN, ERROR_CODE.SHOULD_NOT_BE_DEPLOYED});
     // fix the problem
     hbck = doFsck(TEST_UTIL.getConfiguration(), true);

http://git-wip-us.apache.org/repos/asf/hbase/blob/7280ec09/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
index 2269fe6..2e06fab 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
@@ -169,17 +169,18 @@ public class TestImportExport {
   @Test
   public void testSimpleCase() throws Exception {
     String EXPORT_TABLE = "exportSimpleCase";
-    Table t = UTIL.createTable(TableName.valueOf(EXPORT_TABLE), FAMILYA, 3);
-    Put p = new Put(ROW1);
-    p.add(FAMILYA, QUAL, now, QUAL);
-    p.add(FAMILYA, QUAL, now+1, QUAL);
-    p.add(FAMILYA, QUAL, now+2, QUAL);
-    t.put(p);
-    p = new Put(ROW2);
-    p.add(FAMILYA, QUAL, now, QUAL);
-    p.add(FAMILYA, QUAL, now+1, QUAL);
-    p.add(FAMILYA, QUAL, now+2, QUAL);
-    t.put(p);
+    try (Table t = UTIL.createTable(TableName.valueOf(EXPORT_TABLE), FAMILYA, 3);) {
+      Put p = new Put(ROW1);
+      p.add(FAMILYA, QUAL, now, QUAL);
+      p.add(FAMILYA, QUAL, now+1, QUAL);
+      p.add(FAMILYA, QUAL, now+2, QUAL);
+      t.put(p);
+      p = new Put(ROW2);
+      p.add(FAMILYA, QUAL, now, QUAL);
+      p.add(FAMILYA, QUAL, now+1, QUAL);
+      p.add(FAMILYA, QUAL, now+2, QUAL);
+      t.put(p);
+    }
 
     String[] args = new String[] {
         EXPORT_TABLE,
@@ -189,22 +190,23 @@ public class TestImportExport {
     assertTrue(runExport(args));
 
     String IMPORT_TABLE = "importTableSimpleCase";
-    t = UTIL.createTable(TableName.valueOf(IMPORT_TABLE), FAMILYB, 3);
-    args = new String[] {
+    try (Table t = UTIL.createTable(TableName.valueOf(IMPORT_TABLE), FAMILYB, 3);) {
+      args = new String[] {
         "-D" + Import.CF_RENAME_PROP + "="+FAMILYA_STRING+":"+FAMILYB_STRING,
         IMPORT_TABLE,
         FQ_OUTPUT_DIR
-    };
-    assertTrue(runImport(args));
-
-    Get g = new Get(ROW1);
-    g.setMaxVersions();
-    Result r = t.get(g);
-    assertEquals(3, r.size());
-    g = new Get(ROW2);
-    g.setMaxVersions();
-    r = t.get(g);
-    assertEquals(3, r.size());
+      };
+      assertTrue(runImport(args));
+
+      Get g = new Get(ROW1);
+      g.setMaxVersions();
+      Result r = t.get(g);
+      assertEquals(3, r.size());
+      g = new Get(ROW2);
+      g.setMaxVersions();
+      r = t.get(g);
+      assertEquals(3, r.size());
+    }
   }
 
   /**
@@ -238,23 +240,22 @@ public class TestImportExport {
     FileSystem fs = FileSystem.get(UTIL.getConfiguration());
     fs.copyFromLocalFile(importPath, new Path(FQ_OUTPUT_DIR + Path.SEPARATOR + name));
     String IMPORT_TABLE = name;
-    Table t = UTIL.createTable(TableName.valueOf(IMPORT_TABLE), Bytes.toBytes("f1"), 3);
-    String[] args = new String[] {
-        "-Dhbase.import.version=0.94" ,
-        IMPORT_TABLE, FQ_OUTPUT_DIR
-    };
-    assertTrue(runImport(args));
-
-    /* exportedTableIn94Format contains 5 rows
-     ROW         COLUMN+CELL
-     r1          column=f1:c1, timestamp=1383766761171, value=val1
-     r2          column=f1:c1, timestamp=1383766771642, value=val2
-     r3          column=f1:c1, timestamp=1383766777615, value=val3
-     r4          column=f1:c1, timestamp=1383766785146, value=val4
-     r5          column=f1:c1, timestamp=1383766791506, value=val5
-     */
-    assertEquals(5, UTIL.countRows(t));
-    t.close();
+    try (Table t = UTIL.createTable(TableName.valueOf(IMPORT_TABLE), Bytes.toBytes("f1"),
3);) {
+      String[] args = new String[] {
+          "-Dhbase.import.version=0.94" ,
+          IMPORT_TABLE, FQ_OUTPUT_DIR
+      };
+      assertTrue(runImport(args));
+      /* exportedTableIn94Format contains 5 rows
+      ROW         COLUMN+CELL
+      r1          column=f1:c1, timestamp=1383766761171, value=val1
+      r2          column=f1:c1, timestamp=1383766771642, value=val2
+      r3          column=f1:c1, timestamp=1383766777615, value=val3
+      r4          column=f1:c1, timestamp=1383766785146, value=val4
+      r5          column=f1:c1, timestamp=1383766791506, value=val5
+      */
+     assertEquals(5, UTIL.countRows(t));
+    }
   }
 
   /**
@@ -268,30 +269,30 @@ public class TestImportExport {
         .setMaxVersions(1)
     );
     UTIL.getHBaseAdmin().createTable(desc);
-    Table t = new HTable(UTIL.getConfiguration(), desc.getTableName());
-
-    Put p = new Put(ROW1);
-    p.add(FAMILYA, QUAL, now, QUAL);
-    p.add(FAMILYA, QUAL, now+1, QUAL);
-    p.add(FAMILYA, QUAL, now+2, QUAL);
-    p.add(FAMILYA, QUAL, now+3, QUAL);
-    p.add(FAMILYA, QUAL, now+4, QUAL);
-    t.put(p);
-
-    String[] args = new String[] {
+    try (Table t = new HTable(UTIL.getConfiguration(), desc.getTableName());) {
+      Put p = new Put(ROW1);
+      p.add(FAMILYA, QUAL, now, QUAL);
+      p.add(FAMILYA, QUAL, now+1, QUAL);
+      p.add(FAMILYA, QUAL, now+2, QUAL);
+      p.add(FAMILYA, QUAL, now+3, QUAL);
+      p.add(FAMILYA, QUAL, now+4, QUAL);
+      t.put(p);
+
+      String[] args = new String[] {
         "-D" + Export.EXPORT_BATCHING + "=" + EXPORT_BATCH_SIZE,  // added scanner batching
arg.
         BATCH_TABLE,
         FQ_OUTPUT_DIR
-    };
-    assertTrue(runExport(args));
+      };
+      assertTrue(runExport(args));
 
-    FileSystem fs = FileSystem.get(UTIL.getConfiguration());
-    fs.delete(new Path(FQ_OUTPUT_DIR), true);
-    t.close();
+      FileSystem fs = FileSystem.get(UTIL.getConfiguration());
+      fs.delete(new Path(FQ_OUTPUT_DIR), true);
+    }
   }
 
   @Test
   public void testWithDeletes() throws Exception {
+    String IMPORT_TABLE = "importWithDeletes";
     String EXPORT_TABLE = "exportWithDeletes";
     HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(EXPORT_TABLE));
     desc.addFamily(new HColumnDescriptor(FAMILYA)
@@ -299,62 +300,59 @@ public class TestImportExport {
         .setKeepDeletedCells(true)
     );
     UTIL.getHBaseAdmin().createTable(desc);
-    Table t = new HTable(UTIL.getConfiguration(), desc.getTableName());
-
-    Put p = new Put(ROW1);
-    p.add(FAMILYA, QUAL, now, QUAL);
-    p.add(FAMILYA, QUAL, now+1, QUAL);
-    p.add(FAMILYA, QUAL, now+2, QUAL);
-    p.add(FAMILYA, QUAL, now+3, QUAL);
-    p.add(FAMILYA, QUAL, now+4, QUAL);
-    t.put(p);
-
-    Delete d = new Delete(ROW1, now+3);
-    t.delete(d);
-    d = new Delete(ROW1);
-    d.deleteColumns(FAMILYA, QUAL, now+2);
-    t.delete(d);
-
-    String[] args = new String[] {
+    try (Table t = new HTable(UTIL.getConfiguration(), desc.getTableName());) {
+      Put p = new Put(ROW1);
+      p.add(FAMILYA, QUAL, now, QUAL);
+      p.add(FAMILYA, QUAL, now+1, QUAL);
+      p.add(FAMILYA, QUAL, now+2, QUAL);
+      p.add(FAMILYA, QUAL, now+3, QUAL);
+      p.add(FAMILYA, QUAL, now+4, QUAL);
+      t.put(p);
+
+      Delete d = new Delete(ROW1, now+3);
+      t.delete(d);
+      d = new Delete(ROW1);
+      d.deleteColumns(FAMILYA, QUAL, now+2);
+      t.delete(d);
+
+      String[] args = new String[] {
         "-D" + Export.RAW_SCAN + "=true",
         EXPORT_TABLE,
         FQ_OUTPUT_DIR,
         "1000", // max number of key versions per key to export
-    };
-    assertTrue(runExport(args));
+      };
+      assertTrue(runExport(args));
 
-    String IMPORT_TABLE = "importWithDeletes";
-    desc = new HTableDescriptor(TableName.valueOf(IMPORT_TABLE));
-    desc.addFamily(new HColumnDescriptor(FAMILYA)
+      desc = new HTableDescriptor(TableName.valueOf(IMPORT_TABLE));
+      desc.addFamily(new HColumnDescriptor(FAMILYA)
         .setMaxVersions(5)
         .setKeepDeletedCells(true)
-    );
+      );
+    }
     UTIL.getHBaseAdmin().createTable(desc);
-    t.close();
-    t = new HTable(UTIL.getConfiguration(), desc.getTableName());
-    args = new String[] {
+    try (Table t = new HTable(UTIL.getConfiguration(), desc.getTableName());) {
+       String [] args = new String[] {
         IMPORT_TABLE,
         FQ_OUTPUT_DIR
-    };
-    assertTrue(runImport(args));
-
-    Scan s = new Scan();
-    s.setMaxVersions();
-    s.setRaw(true);
-    ResultScanner scanner = t.getScanner(s);
-    Result r = scanner.next();
-    Cell[] res = r.rawCells();
-    assertTrue(CellUtil.isDeleteFamily(res[0]));
-    assertEquals(now+4, res[1].getTimestamp());
-    assertEquals(now+3, res[2].getTimestamp());
-    assertTrue(CellUtil.isDelete(res[3]));
-    assertEquals(now+2, res[4].getTimestamp());
-    assertEquals(now+1, res[5].getTimestamp());
-    assertEquals(now, res[6].getTimestamp());
-    t.close();
+       };
+       assertTrue(runImport(args));
+
+       Scan s = new Scan();
+       s.setMaxVersions();
+       s.setRaw(true);
+       ResultScanner scanner = t.getScanner(s);
+       Result r = scanner.next();
+       Cell[] res = r.rawCells();
+       assertTrue(CellUtil.isDeleteFamily(res[0]));
+       assertEquals(now+4, res[1].getTimestamp());
+       assertEquals(now+3, res[2].getTimestamp());
+       assertTrue(CellUtil.isDelete(res[3]));
+       assertEquals(now+2, res[4].getTimestamp());
+      assertEquals(now+1, res[5].getTimestamp());
+      assertEquals(now, res[6].getTimestamp());
+    }
   }
-  
-  
+
   @Test
   public void testWithMultipleDeleteFamilyMarkersOfSameRowSameFamily() throws Exception {
     String EXPORT_TABLE = "exportWithMultipleDeleteFamilyMarkersOfSameRowSameFamily";
@@ -365,15 +363,14 @@ public class TestImportExport {
     );
     UTIL.getHBaseAdmin().createTable(desc);
     HTable exportT = new HTable(UTIL.getConfiguration(), EXPORT_TABLE);
+      //Add first version of QUAL
+      Put p = new Put(ROW1);
+      p.add(FAMILYA, QUAL, now, QUAL);
+      exportT.put(p);
 
-    //Add first version of QUAL
-    Put p = new Put(ROW1);
-    p.add(FAMILYA, QUAL, now, QUAL);
-    exportT.put(p);
-
-    //Add Delete family marker
-    Delete d = new Delete(ROW1, now+3);
-    exportT.delete(d);
+      //Add Delete family marker
+      Delete d = new Delete(ROW1, now+3);
+      exportT.delete(d);
 
     //Add second version of QUAL
     p = new Put(ROW1);
@@ -383,8 +380,8 @@ public class TestImportExport {
     //Add second Delete family marker
     d = new Delete(ROW1, now+7);
     exportT.delete(d);
-    
-    
+
+
     String[] args = new String[] {
         "-D" + Export.RAW_SCAN + "=true",
         EXPORT_TABLE,
@@ -400,7 +397,7 @@ public class TestImportExport {
         .setKeepDeletedCells(true)
     );
     UTIL.getHBaseAdmin().createTable(desc);
-    
+
     HTable importT = new HTable(UTIL.getConfiguration(), IMPORT_TABLE);
     args = new String[] {
         IMPORT_TABLE,
@@ -411,21 +408,17 @@ public class TestImportExport {
     Scan s = new Scan();
     s.setMaxVersions();
     s.setRaw(true);
-    
+
     ResultScanner importedTScanner = importT.getScanner(s);
     Result importedTResult = importedTScanner.next();
-    
+
     ResultScanner exportedTScanner = exportT.getScanner(s);
     Result  exportedTResult =  exportedTScanner.next();
-    try
-    {
+    try {
       Result.compareResults(exportedTResult, importedTResult);
-    }
-    catch (Exception e) {
+    } catch (Exception e) {
       fail("Original and imported tables data comparision failed with error:"+e.getMessage());
-    }
-    finally
-    {
+    } finally {
       exportT.close();
       importT.close();
     }
@@ -469,7 +462,8 @@ public class TestImportExport {
 
     Table importTable = new HTable(UTIL.getConfiguration(), desc.getTableName());
     args = new String[] { "-D" + Import.FILTER_CLASS_CONF_KEY + "=" + PrefixFilter.class.getName(),
-        "-D" + Import.FILTER_ARGS_CONF_KEY + "=" + Bytes.toString(ROW1), IMPORT_TABLE, FQ_OUTPUT_DIR,
+        "-D" + Import.FILTER_ARGS_CONF_KEY + "=" + Bytes.toString(ROW1), IMPORT_TABLE,
+        FQ_OUTPUT_DIR,
         "1000" };
     assertTrue(runImport(args));
 
@@ -512,7 +506,7 @@ public class TestImportExport {
     results.close();
     return count;
   }
-  
+
   /**
    * test main method. Import should print help and call System.exit
    */
@@ -624,7 +618,7 @@ public class TestImportExport {
     args.add("param2");
 
     Import.addFilterAndArguments(configuration, FilterBase.class, args);
-    assertEquals("org.apache.hadoop.hbase.filter.FilterBase", 
+    assertEquals("org.apache.hadoop.hbase.filter.FilterBase",
         configuration.get(Import.FILTER_CLASS_CONF_KEY));
     assertEquals("param1,param2", configuration.get(Import.FILTER_ARGS_CONF_KEY));
   }
@@ -633,20 +627,20 @@ public class TestImportExport {
   public void testDurability() throws IOException, InterruptedException, ClassNotFoundException
{
     // Create an export table.
     String exportTableName = "exporttestDurability";
-    Table exportTable = UTIL.createTable(TableName.valueOf(exportTableName), FAMILYA, 3);
-
-    // Insert some data
-    Put put = new Put(ROW1);
-    put.add(FAMILYA, QUAL, now, QUAL);
-    put.add(FAMILYA, QUAL, now + 1, QUAL);
-    put.add(FAMILYA, QUAL, now + 2, QUAL);
-    exportTable.put(put);
-
-    put = new Put(ROW2);
-    put.add(FAMILYA, QUAL, now, QUAL);
-    put.add(FAMILYA, QUAL, now + 1, QUAL);
-    put.add(FAMILYA, QUAL, now + 2, QUAL);
-    exportTable.put(put);
+    try (Table exportTable = UTIL.createTable(TableName.valueOf(exportTableName), FAMILYA,
3);) {
+      // Insert some data
+      Put put = new Put(ROW1);
+      put.add(FAMILYA, QUAL, now, QUAL);
+      put.add(FAMILYA, QUAL, now + 1, QUAL);
+      put.add(FAMILYA, QUAL, now + 2, QUAL);
+      exportTable.put(put);
+
+      put = new Put(ROW2);
+      put.add(FAMILYA, QUAL, now, QUAL);
+      put.add(FAMILYA, QUAL, now + 1, QUAL);
+      put.add(FAMILYA, QUAL, now + 2, QUAL);
+      exportTable.put(put);
+    }
 
     // Run the export
     String[] args = new String[] { exportTableName, FQ_OUTPUT_DIR, "1000"};
@@ -654,39 +648,46 @@ public class TestImportExport {
 
     // Create the table for import
     String importTableName = "importTestDurability1";
-    Table importTable = UTIL.createTable(TableName.valueOf(importTableName), FAMILYA, 3);
-
-    // Register the wal listener for the import table
-    TableWALActionListener walListener = new TableWALActionListener(importTableName);
-    HRegionInfo region = UTIL.getHBaseCluster().getRegionServerThreads().get(0).getRegionServer()
-        .getOnlineRegions(importTable.getName()).get(0).getRegionInfo();
-    WAL wal = UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL(region);
-    wal.registerWALActionsListener(walListener);
-
-    // Run the import with SKIP_WAL
-    args =
-        new String[] { "-D" + Import.WAL_DURABILITY + "=" + Durability.SKIP_WAL.name(),
+    WAL wal = null;
+    HRegionInfo region = null;
+    TableWALActionListener walListener = null;
+    try (Table importTable =
+      UTIL.createTable(TableName.valueOf(importTableName), FAMILYA, 3);) {
+
+      // Register the wal listener for the import table
+      walListener = new TableWALActionListener(importTableName);
+      region = UTIL.getHBaseCluster().getRegionServerThreads().get(0).getRegionServer()
+          .getOnlineRegions(importTable.getName()).get(0).getRegionInfo();
+      wal = UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL(region);
+      wal.registerWALActionsListener(walListener);
+
+      // Run the import with SKIP_WAL
+      args =
+          new String[] { "-D" + Import.WAL_DURABILITY + "=" + Durability.SKIP_WAL.name(),
             importTableName, FQ_OUTPUT_DIR };
-    assertTrue(runImport(args));
-    //Assert that the wal is not visisted
-    assertTrue(!walListener.isWALVisited());
-    //Ensure that the count is 2 (only one version of key value is obtained)
-    assertTrue(getCount(importTable, null) == 2);
+      assertTrue(runImport(args));
+      //Assert that the wal is not visisted
+      assertTrue(!walListener.isWALVisited());
+      //Ensure that the count is 2 (only one version of key value is obtained)
+      assertTrue(getCount(importTable, null) == 2);
 
-    // Run the import with the default durability option
+      // Run the import with the default durability option
+    }
     importTableName = "importTestDurability2";
-    importTable = UTIL.createTable(TableName.valueOf(importTableName), FAMILYA, 3);
-    region = UTIL.getHBaseCluster().getRegionServerThreads().get(0).getRegionServer()
+    try (Table importTable =
+        UTIL.createTable(TableName.valueOf(importTableName), FAMILYA, 3);) {
+      region = UTIL.getHBaseCluster().getRegionServerThreads().get(0).getRegionServer()
         .getOnlineRegions(importTable.getName()).get(0).getRegionInfo();
-    wal = UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL(region);
-    walListener = new TableWALActionListener(importTableName);
-    wal.registerWALActionsListener(walListener);
-    args = new String[] { importTableName, FQ_OUTPUT_DIR };
-    assertTrue(runImport(args));
-    //Assert that the wal is visisted
-    assertTrue(walListener.isWALVisited());
-    //Ensure that the count is 2 (only one version of key value is obtained)
-    assertTrue(getCount(importTable, null) == 2);
+      wal = UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL(region);
+      walListener = new TableWALActionListener(importTableName);
+      wal.registerWALActionsListener(walListener);
+      args = new String[] { importTableName, FQ_OUTPUT_DIR };
+      assertTrue(runImport(args));
+      //Assert that the wal is visisted
+      assertTrue(walListener.isWALVisited());
+      //Ensure that the count is 2 (only one version of key value is obtained)
+      assertTrue(getCount(importTable, null) == 2);
+    }
   }
 
   /**
@@ -712,5 +713,5 @@ public class TestImportExport {
     public boolean isWALVisited() {
       return isVisited;
     }
-  }  
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7280ec09/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
index 17119f8..a49c62e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.regionserver;
 import static org.apache.hadoop.hbase.HBaseTestingUtility.fam1;
 import static org.apache.hadoop.hbase.HBaseTestingUtility.fam2;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.fail;
 
@@ -60,6 +61,8 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.filter.BinaryComparator;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.io.HeapSize;
+import org.apache.hadoop.hbase.io.hfile.BlockCache;
+import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.wal.WAL;
@@ -92,21 +95,25 @@ public class TestAtomicOperation {
   static final byte [] row = Bytes.toBytes("rowA");
   static final byte [] row2 = Bytes.toBytes("rowB");
 
-  @Before 
+  @Before
   public void setup() {
     tableName = Bytes.toBytes(name.getMethodName());
   }
-  
+
   @After
   public void teardown() throws IOException {
     if (region != null) {
+      BlockCache bc = region.getStores().get(0).getCacheConfig().getBlockCache();
       ((HRegion)region).close();
+      WAL wal = ((HRegion)region).getWAL();
+      if (wal != null) wal.close();
+      if (bc != null) bc.shutdown();
       region = null;
     }
   }
   //////////////////////////////////////////////////////////////////////////////
   // New tests that doesn't spin up a mini cluster but rather just test the
-  // individual code pieces in the HRegion. 
+  // individual code pieces in the HRegion.
   //////////////////////////////////////////////////////////////////////////////
 
   /**
@@ -175,17 +182,15 @@ public class TestAtomicOperation {
    */
   @Test
   public void testIncrementMultiThreads() throws IOException {
-
     LOG.info("Starting test testIncrementMultiThreads");
     // run a with mixed column families (1 and 3 versions)
     initHRegion(tableName, name.getMethodName(), new int[] {1,3}, fam1, fam2);
 
-    // create 100 threads, each will increment by its own quantity
-    int numThreads = 100;
+    // create 25 threads, each will increment by its own quantity
+    int numThreads = 25;
     int incrementsPerThread = 1000;
     Incrementer[] all = new Incrementer[numThreads];
     int expectedTotal = 0;
-
     // create all threads
     for (int i = 0; i < numThreads; i++) {
       all[i] = new Incrementer(region, i, i, incrementsPerThread);
@@ -202,13 +207,13 @@ public class TestAtomicOperation {
       try {
         all[i].join();
       } catch (InterruptedException e) {
+        LOG.info("Ignored", e);
       }
     }
     assertICV(row, fam1, qual1, expectedTotal);
     assertICV(row, fam1, qual2, expectedTotal*2);
     assertICV(row, fam2, qual3, expectedTotal*3);
-    LOG.info("testIncrementMultiThreads successfully verified that total is " +
-             expectedTotal);
+    LOG.info("testIncrementMultiThreads successfully verified that total is " + expectedTotal);
   }
 
 
@@ -259,6 +264,7 @@ public class TestAtomicOperation {
 
     public Incrementer(Region region,
         int threadNumber, int amount, int numIncrements) {
+      super("incrementer." + threadNumber);
       this.region = region;
       this.numIncrements = numIncrements;
       this.amount = amount;
@@ -267,7 +273,7 @@ public class TestAtomicOperation {
 
     @Override
     public void run() {
-      for (int i=0; i<numIncrements; i++) {
+      for (int i = 0; i < numIncrements; i++) {
         try {
           Increment inc = new Increment(row);
           inc.addColumn(fam1, qual1, amount);
@@ -279,8 +285,15 @@ public class TestAtomicOperation {
           // verify: Make sure we only see completed increments
           Get g = new Get(row);
           Result result = region.get(g);
-          assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*2, Bytes.toLong(result.getValue(fam1,
qual2))); 
-          assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*3, Bytes.toLong(result.getValue(fam2,
qual3)));
+          if (result != null) {
+            assertTrue(result.getValue(fam1, qual1) != null);
+            assertTrue(result.getValue(fam1, qual2) != null);
+            assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*2,
+              Bytes.toLong(result.getValue(fam1, qual2)));
+            assertTrue(result.getValue(fam2, qual3) != null);
+            assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*3,
+              Bytes.toLong(result.getValue(fam2, qual3)));
+          }
         } catch (IOException e) {
           e.printStackTrace();
         }
@@ -316,8 +329,8 @@ public class TestAtomicOperation {
 
               Get g = new Get(row);
               Result result = region.get(g);
-              assertEquals(result.getValue(fam1, qual1).length, result.getValue(fam1, qual2).length);

-              assertEquals(result.getValue(fam1, qual1).length, result.getValue(fam2, qual3).length);

+              assertEquals(result.getValue(fam1, qual1).length, result.getValue(fam1, qual2).length);
+              assertEquals(result.getValue(fam1, qual1).length, result.getValue(fam2, qual3).length);
             } catch (IOException e) {
               e.printStackTrace();
               failures.incrementAndGet();
@@ -358,7 +371,7 @@ public class TestAtomicOperation {
     // create 10 threads, each will alternate between adding and
     // removing a column
     int numThreads = 10;
-    int opsPerThread = 500;
+    int opsPerThread = 250;
     AtomicOperation[] all = new AtomicOperation[numThreads];
 
     AtomicLong timeStamps = new AtomicLong(0);
@@ -450,7 +463,7 @@ public class TestAtomicOperation {
     // create 10 threads, each will alternate between adding and
     // removing a column
     int numThreads = 10;
-    int opsPerThread = 500;
+    int opsPerThread = 250;
     AtomicOperation[] all = new AtomicOperation[numThreads];
 
     AtomicLong timeStamps = new AtomicLong(0);
@@ -549,7 +562,7 @@ public class TestAtomicOperation {
       this.failures = failures;
     }
   }
-  
+
   private static CountDownLatch latch = new CountDownLatch(1);
   private enum TestStep {
     INIT,                  // initial put of 10 to set value of the cell
@@ -561,11 +574,11 @@ public class TestAtomicOperation {
   }
   private static volatile TestStep testStep = TestStep.INIT;
   private final String family = "f1";
-     
+
   /**
    * Test written as a verifier for HBASE-7051, CheckAndPut should properly read
-   * MVCC. 
-   * 
+   * MVCC.
+   *
    * Moved into TestAtomicOperation from its original location, TestHBase7051
    */
   @Test
@@ -581,7 +594,7 @@ public class TestAtomicOperation {
     Put put = new Put(Bytes.toBytes("r1"));
     put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("10"));
     puts[0] = put;
-    
+
     region.batchMutate(puts, HConstants.NO_NONCE, HConstants.NO_NONCE);
     MultithreadedTestUtil.TestContext ctx =
       new MultithreadedTestUtil.TestContext(conf);
@@ -600,7 +613,6 @@ public class TestAtomicOperation {
     for (Cell keyValue : results) {
       assertEquals("50",Bytes.toString(CellUtil.cloneValue(keyValue)));
     }
-
   }
 
   private class PutThread extends TestThread {
@@ -656,7 +668,7 @@ public class TestAtomicOperation {
       }
       return new WrappedRowLock(super.getRowLock(row, readLock));
     }
-    
+
     public class WrappedRowLock implements RowLock {
 
       private final RowLock rowLock;


Mime
View raw message