hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From bus...@apache.org
Subject [2/3] hbase git commit: HBASE-16912 Cleanup for HBaseTestingUtility.
Date Tue, 29 Nov 2016 07:20:57 GMT
http://git-wip-us.apache.org/repos/asf/hbase/blob/6d1813a2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
index acd1d72..3590624 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
@@ -35,8 +35,6 @@ import java.util.concurrent.atomic.AtomicLong;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseTestingUtility;

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d1813a2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java
index edd7847..163007a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java
@@ -27,7 +27,6 @@ import java.util.List;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -55,18 +54,17 @@ import static junit.framework.TestCase.assertTrue;
 import static org.junit.Assert.assertEquals;
 
 @Category({RegionServerTests.class, MediumTests.class})
-public class TestBlocksRead  {
+public class TestBlocksRead {
   private static final Log LOG = LogFactory.getLog(TestBlocksRead.class);
   @Rule public TestName testName = new TestName();
 
-  static final BloomType[] BLOOM_TYPE = new BloomType[] { BloomType.ROWCOL,
+  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private static BlockCache blockCache;
+
+  private static final BloomType[] BLOOM_TYPE = new BloomType[] { BloomType.ROWCOL,
       BloomType.ROW, BloomType.NONE };
 
-  private static BlockCache blockCache;
   Region region = null;
-  private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-  private final String DIR = TEST_UTIL.getDataTestDir("TestBlocksRead").toString();
-  private Configuration conf = TEST_UTIL.getConfiguration();
 
   @BeforeClass
   public static void setUp() throws Exception {
@@ -80,16 +78,10 @@ public class TestBlocksRead  {
   }
 
   /**
-   * Callers must afterward call {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)}
-   * @param tableName
-   * @param callingMethod
-   * @param conf
-   * @param family
-   * @throws IOException
+   * Callers must afterward call {@link HBaseTestingUtility#closeRegionAndWAL(Region)}
    * @return created and initialized region.
    */
-  private Region initHRegion(byte[] tableName, String callingMethod,
-      Configuration conf, String family) throws IOException {
+  private Region initHRegion(byte[] tableName, String family) throws IOException {
     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
     HColumnDescriptor familyDesc;
     for (int i = 0; i < BLOOM_TYPE.length; i++) {
@@ -101,9 +93,8 @@ public class TestBlocksRead  {
     }
 
     HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
-    Path path = new Path(DIR + callingMethod);
-    Region r = HBaseTestingUtility.createRegionAndWAL(info, path, conf, htd);
-    blockCache = new CacheConfig(conf).getBlockCache();
+    Region r = TEST_UTIL.createLocalHRegion(info, htd);
+    blockCache = new CacheConfig(TEST_UTIL.getConfiguration()).getBlockCache();
     return r;
   }
 
@@ -213,7 +204,7 @@ public class TestBlocksRead  {
     byte[] TABLE = Bytes.toBytes("testBlocksRead");
     String FAMILY = "cf1";
     Cell kvs[];
-    this.region = initHRegion(TABLE, testName.getMethodName(), conf, FAMILY);
+    this.region = initHRegion(TABLE, FAMILY);
 
     try {
       putData(FAMILY, "row", "col1", 1);
@@ -269,7 +260,7 @@ public class TestBlocksRead  {
     byte[] TABLE = Bytes.toBytes("testLazySeekBlocksRead");
     String FAMILY = "cf1";
     Cell kvs[];
-    this.region = initHRegion(TABLE, testName.getMethodName(), conf, FAMILY);
+    this.region = initHRegion(TABLE, FAMILY);
 
     try {
       // File 1
@@ -376,7 +367,7 @@ public class TestBlocksRead  {
     byte [] TABLE = Bytes.toBytes("testBlocksReadWhenCachingDisabled");
     String FAMILY = "cf1";
 
-    this.region = initHRegion(TABLE, testName.getMethodName(), conf, FAMILY);
+    this.region = initHRegion(TABLE, FAMILY);
 
     try {
       putData(FAMILY, "row", "col1", 1);
@@ -420,7 +411,7 @@ public class TestBlocksRead  {
     byte[] TABLE = Bytes.toBytes("testLazySeekBlocksReadWithDelete");
     String FAMILY = "cf1";
     Cell kvs[];
-    this.region = initHRegion(TABLE, testName.getMethodName(), conf, FAMILY);
+    this.region = initHRegion(TABLE, FAMILY);
     try {
       deleteFamily(FAMILY, "row", 200);
       for (int i = 0; i < 100; i++) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d1813a2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
index 93f6dcb..3b99224 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
@@ -32,16 +32,12 @@ import static org.mockito.Mockito.when;
 
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.ChoreService;
 import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -59,7 +55,6 @@ import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor;
 import org.apache.hadoop.hbase.regionserver.throttle.CompactionThroughputControllerFactory;
 import org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController;
 import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d1813a2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java
index e9ff8ec..20c2779 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java
@@ -262,7 +262,7 @@ public class TestFailedAppendAndSync {
 
   /**
    * @return A region on which you must call
-   *         {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done.
+   *         {@link HBaseTestingUtility#closeRegionAndWAL(Region)} when done.
    */
   public static HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, WAL wal)
   throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d1813a2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
index 7ee3f0b..67a2eeb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
@@ -24,9 +24,6 @@ import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -43,7 +40,6 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.wal.WAL;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -72,17 +68,14 @@ public class TestGetClosestAtOrBefore  {
   private static final byte[] T40 = Bytes.toBytes("040");
 
   private static HBaseTestingUtility UTIL = new HBaseTestingUtility();
-  private static Configuration conf = UTIL.getConfiguration();
 
   @Test
   public void testUsingMetaAndBinary() throws IOException {
-    FileSystem filesystem = FileSystem.get(conf);
-    Path rootdir = UTIL.getDataTestDirOnTestFS();
     // Up flush size else we bind up when we use default catalog flush of 16k.
     UTIL.getMetaTableDescriptor().setMemStoreFlushSize(64 * 1024 * 1024);
 
-    Region mr = HBaseTestingUtility.createRegionAndWAL(HRegionInfo.FIRST_META_REGIONINFO,
-        rootdir, this.conf, UTIL.getMetaTableDescriptor());
+    Region mr = UTIL.createLocalHRegion(HRegionInfo.FIRST_META_REGIONINFO,
+        UTIL.getMetaTableDescriptor());
     try {
     // Write rows for three tables 'A', 'B', and 'C'.
     for (char c = 'A'; c < 'D'; c++) {
@@ -144,7 +137,7 @@ public class TestGetClosestAtOrBefore  {
     findRow(mr, 'C', 46, -1);
     findRow(mr, 'C', 43, -1);
     } finally {
-      HBaseTestingUtility.closeRegionAndWAL(mr);
+      UTIL.destroyRegion(mr);
     }
   }
 
@@ -162,9 +155,7 @@ public class TestGetClosestAtOrBefore  {
     TableName tableb = TableName.valueOf("" + table);
     // Find the row.
     byte [] tofindBytes = Bytes.toBytes((short)rowToFind);
-    byte [] metaKey = HRegionInfo.createRegionName(
-        tableb, tofindBytes,
-      HConstants.NINES, false);
+    byte [] metaKey = HRegionInfo.createRegionName( tableb, tofindBytes, HConstants.NINES, false);
     LOG.info("find=" + new String(metaKey));
     Result r = UTIL.getClosestRowBefore(mr, metaKey, HConstants.CATALOG_FAMILY);
     if (answer == -1) {
@@ -291,9 +282,7 @@ public class TestGetClosestAtOrBefore  {
     } finally {
       if (region != null) {
         try {
-          WAL wal = ((HRegion)region).getWAL();
-          ((HRegion)region).close();
-          wal.close();
+          UTIL.destroyRegion(region);
         } catch (Exception e) {
           e.printStackTrace();
         }
@@ -349,9 +338,7 @@ public class TestGetClosestAtOrBefore  {
     } finally {
       if (region != null) {
         try {
-          WAL wal = ((HRegion)region).getWAL();
-          ((HRegion)region).close();
-          wal.close();
+          UTIL.destroyRegion(region);
         } catch (Exception e) {
           e.printStackTrace();
         }

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d1813a2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index c9b4217..cbb79ae 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -19,9 +19,7 @@
 package org.apache.hadoop.hbase.regionserver;
 
 import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
-import com.google.protobuf.ByteString;
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.security.PrivilegedExceptionAction;
@@ -31,7 +29,6 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
-import java.util.Map;
 import java.util.NavigableMap;
 import java.util.TreeMap;
 import java.util.UUID;
@@ -49,8 +46,6 @@ import org.apache.commons.lang.RandomStringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.ArrayBackedTag;
@@ -102,17 +97,10 @@ import org.apache.hadoop.hbase.filter.NullComparator;
 import org.apache.hadoop.hbase.filter.PrefixFilter;
 import org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter;
 import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
-import org.apache.hadoop.hbase.fs.legacy.LegacyLayout;
 import org.apache.hadoop.hbase.io.hfile.HFile;
-import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
-import org.apache.hadoop.hbase.monitoring.MonitoredTask;
-import org.apache.hadoop.hbase.monitoring.TaskMonitor;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.ServerProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction;
-import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.StoreDescriptor;
 import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
@@ -120,12 +108,10 @@ import org.apache.hadoop.hbase.regionserver.Region.RowLock;
 import org.apache.hadoop.hbase.regionserver.TestStore.FaultyFileSystem;
 import org.apache.hadoop.hbase.regionserver.handler.FinishRegionRecoveringHandler;
 import org.apache.hadoop.hbase.regionserver.wal.FSHLog;
-import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
 import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL;
 import org.apache.hadoop.hbase.regionserver.wal.MetricsWALSource;
 import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
-import org.apache.hadoop.hbase.regionserver.wal.WALUtil;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.test.MetricsAssertHelper;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
@@ -139,14 +125,11 @@ import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge;
 import org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
 import org.apache.hadoop.hbase.util.PairOfSameType;
 import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
 import org.apache.hadoop.hbase.wal.FaultyFSLog;
 import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.hadoop.hbase.wal.WALFactory;
 import org.apache.hadoop.hbase.wal.WALKey;
-import org.apache.hadoop.hbase.wal.WALProvider;
 import org.apache.hadoop.hbase.wal.WALProvider.Writer;
-import org.apache.hadoop.hbase.wal.WALSplitter;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -252,7 +235,6 @@ public class TestHRegion {
 
   /**
    * Test that I can use the max flushed sequence id after the close.
-   * @throws IOException
    */
   @Test
   public void testSequenceId() throws IOException {
@@ -287,7 +269,6 @@ public class TestHRegion {
    * case previous flush fails and leaves some data in snapshot. The bug could cause loss of data
    * in current memstore. The fix is removing all conditions except abort check so we ensure 2
    * flushes for region close."
-   * @throws IOException
    */
   @Test
   public void testCloseCarryingSnapshot() throws IOException {
@@ -475,7 +456,6 @@ public class TestHRegion {
    * much smaller than expected. In extreme case, if the error accumulates to even bigger than
    * HRegion's memstore size limit, any further flush is skipped because flush does not do anything
    * if memstoreSize is not larger than 0."
-   * @throws Exception
    */
   @Test
   public void testFlushSizeAccounting() throws Exception {
@@ -4251,7 +4231,7 @@ public class TestHRegion {
     HTableDescriptor htd = new HTableDescriptor(tableName);
     htd.addFamily(hcd);
     HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
-    this.region = TEST_UTIL.createLocalHRegion(info, htd);
+    Region region = TEST_UTIL.createLocalHRegion(info, htd);
     try {
       int num_unique_rows = 10;
       int duplicate_multiplier = 2;
@@ -4293,8 +4273,7 @@ public class TestHRegion {
         assertEquals(num_unique_rows, reader.getFilterEntries());
       }
     } finally {
-      HBaseTestingUtility.closeRegionAndWAL(this.region);
-      this.region = null;
+      TEST_UTIL.destroyRegion(region);
     }
   }
 
@@ -4309,7 +4288,7 @@ public class TestHRegion {
     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
     htd.addFamily(hcd);
     HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
-    this.region = TEST_UTIL.createLocalHRegion(info, htd);
+    Region region = TEST_UTIL.createLocalHRegion(info, htd);
     try {
       // For row:0, col:0: insert versions 1 through 5.
       byte row[] = Bytes.toBytes("row:" + 0);
@@ -4336,8 +4315,7 @@ public class TestHRegion {
       checkOneCell(kvs[2], FAMILY, 0, 0, 2);
       checkOneCell(kvs[3], FAMILY, 0, 0, 1);
     } finally {
-      HBaseTestingUtility.closeRegionAndWAL(this.region);
-      this.region = null;
+      TEST_UTIL.destroyRegion(region);
     }
   }
 
@@ -4357,7 +4335,7 @@ public class TestHRegion {
     HTableDescriptor htd = new HTableDescriptor(tableName);
     htd.addFamily(hcd);
     HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
-    this.region = TEST_UTIL.createLocalHRegion(info, htd);
+    Region region = TEST_UTIL.createLocalHRegion(info, htd);
     try {
       // Insert some data
       byte row[] = Bytes.toBytes("row1");
@@ -4379,8 +4357,7 @@ public class TestHRegion {
       Cell[] keyValues = region.get(get).rawCells();
       assertTrue(keyValues.length == 0);
     } finally {
-      HBaseTestingUtility.closeRegionAndWAL(this.region);
-      this.region = null;
+      TEST_UTIL.destroyRegion(region);
     }
   }
 
@@ -4993,14 +4970,6 @@ public class TestHRegion {
 //    }
 //  }
 
-  static WALFactory createWALFactory(Configuration conf, Path rootDir) throws IOException {
-    Configuration confForWAL = new Configuration(conf);
-    confForWAL.set(HConstants.HBASE_DIR, rootDir.toString());
-    return new WALFactory(confForWAL,
-        Collections.<WALActionsListener>singletonList(new MetricsWAL()),
-        "hregion-" + RandomStringUtils.randomNumeric(8));
-  }
-
 //  @Test
 //  public void testCompactionFromPrimary() throws IOException {
 //    Path rootDir = new Path(dir + "testRegionReplicaSecondary");
@@ -5203,7 +5172,7 @@ public class TestHRegion {
 
   /**
    * @return A region on which you must call
-   *         {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done.
+   *         {@link HBaseTestingUtility#closeRegionAndWAL(Region)} when done.
    */
   protected HRegion initHRegion(TableName tableName, String callingMethod, Configuration conf,
       byte[]... families) throws IOException {
@@ -5212,7 +5181,7 @@ public class TestHRegion {
 
   /**
    * @return A region on which you must call
-   *         {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done.
+   *         {@link HBaseTestingUtility#closeRegionAndWAL(Region)} when done.
    */
   protected HRegion initHRegion(TableName tableName, String callingMethod, Configuration conf,
       boolean isReadOnly, byte[]... families) throws IOException {
@@ -5231,7 +5200,7 @@ public class TestHRegion {
 
   /**
    * @return A region on which you must call
-   *         {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done.
+   *         {@link HBaseTestingUtility#closeRegionAndWAL(Region)} when done.
    */
   public HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey,
       String callingMethod, Configuration conf, boolean isReadOnly, Durability durability,
@@ -6042,9 +6011,7 @@ public class TestHRegion {
       HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY);
 
     // open the region w/o rss and wal and flush some files
-    HRegion region =
-         HBaseTestingUtility.createRegionAndWAL(hri, TEST_UTIL.getDataTestDir(), TEST_UTIL
-             .getConfiguration(), htd);
+    Region region = TEST_UTIL.createLocalHRegion(hri, htd);
     assertNotNull(region);
 
     // create a file in fam1 for the region before opening in OpenRegionHandler
@@ -6094,7 +6061,7 @@ public class TestHRegion {
       assertEquals(0, store.getStoreFileCount()); // no store files
 
     } finally {
-      HBaseTestingUtility.closeRegionAndWAL(region);
+      TEST_UTIL.destroyRegion(region);
     }
   }
 
@@ -6112,21 +6079,23 @@ public class TestHRegion {
 
   @Test
   public void testFlushedFileWithNoTags() throws Exception {
-    TableName tableName = TableName.valueOf(getClass().getSimpleName());
-    HTableDescriptor htd = new HTableDescriptor(tableName);
-    htd.addFamily(new HColumnDescriptor(fam1));
-    HRegionInfo info = new HRegionInfo(tableName, null, null, false);
-    Path path = TEST_UTIL.getDataTestDir(getClass().getSimpleName());
-    region = HBaseTestingUtility.createRegionAndWAL(info, path, TEST_UTIL.getConfiguration(), htd);
-    Put put = new Put(Bytes.toBytes("a-b-0-0"));
-    put.addColumn(fam1, qual1, Bytes.toBytes("c1-value"));
-    region.put(put);
-    region.flush(true);
-    Store store = region.getStore(fam1);
-    Collection<StoreFile> storefiles = store.getStorefiles();
-    for (StoreFile sf : storefiles) {
-      assertFalse("Tags should not be present "
-          ,sf.getReader().getHFileReader().getFileContext().isIncludesTags());
+    try {
+      TableName tableName = TableName.valueOf(getClass().getSimpleName());
+      HTableDescriptor htd = new HTableDescriptor(tableName);
+      htd.addFamily(new HColumnDescriptor(fam1));
+      HRegionInfo info = new HRegionInfo(tableName, null, null, false);
+      Region region = TEST_UTIL.createLocalHRegion(info, htd);
+      Put put = new Put(Bytes.toBytes("a-b-0-0"));
+      put.addColumn(fam1, qual1, Bytes.toBytes("c1-value"));
+      region.put(put);
+      region.flush(true);
+      Store store = region.getStore(fam1);
+      Collection<StoreFile> storefiles = store.getStorefiles();
+      for (StoreFile sf : storefiles) {
+        assertFalse("Tags should not be present ", sf.getReader().getHFileReader().getFileContext().isIncludesTags());
+      }
+    } finally {
+      TEST_UTIL.destroyRegion(region);
     }
   }
 
@@ -6146,9 +6115,7 @@ public class TestHRegion {
       HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY);
 
     // open the region w/o rss and wal and flush some files
-    HRegion region =
-         HBaseTestingUtility.createRegionAndWAL(hri, TEST_UTIL.getDataTestDir(), TEST_UTIL
-             .getConfiguration(), htd);
+    Region region = TEST_UTIL.createLocalHRegion(hri, htd);
     assertNotNull(region);
 
     // create a file in fam1 for the region before opening in OpenRegionHandler
@@ -6215,15 +6182,13 @@ public class TestHRegion {
       assertEquals(0, store.getStoreFileCount()); // no store files
 
     } finally {
-      HBaseTestingUtility.closeRegionAndWAL(region);
+      TEST_UTIL.destroyRegion(region);
     }
   }
 
   /**
    * Utility method to setup a WAL mock.
    * Needs to do the bit where we close latch on the WALKey on append else test hangs.
-   * @return
-   * @throws IOException
    */
   private WAL mockWAL() throws IOException {
     WAL wal = mock(WAL.class);
@@ -6373,9 +6338,9 @@ public class TestHRegion {
     Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
     conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MIN_FORMAT_VERSION_WITH_TAGS);
 
-    HRegion region = HBaseTestingUtility.createRegionAndWAL(new HRegionInfo(htd.getTableName(),
+    Region region = TEST_UTIL.createLocalHRegion(new HRegionInfo(htd.getTableName(),
             HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY),
-        TEST_UTIL.getDataTestDir(), conf, htd);
+        htd, conf);
     assertNotNull(region);
     try {
       long now = EnvironmentEdgeManager.currentTime();
@@ -6452,7 +6417,7 @@ public class TestHRegion {
       // Increment with a TTL of 5 seconds
       Increment incr = new Increment(row).addColumn(fam1, q1, 1L);
       incr.setTTL(5000);
-      region.increment(incr); // 2
+      region.increment(incr, HConstants.NO_NONCE, HConstants.NO_NONCE); // 2
 
       // New value should be 2
       r = region.get(new Get(row));
@@ -6477,7 +6442,7 @@ public class TestHRegion {
       assertNull(r.getValue(fam1, q1));
 
     } finally {
-      HBaseTestingUtility.closeRegionAndWAL(region);
+      TEST_UTIL.destroyRegion(region);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d1813a2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
index de561d5..1370143 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
@@ -41,11 +41,13 @@ import com.google.protobuf.ByteString;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
 import java.util.UUID;
 
+import org.apache.commons.lang.RandomStringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -80,6 +82,8 @@ import org.apache.hadoop.hbase.protobuf.generated.WALProtos.StoreDescriptor;
 import org.apache.hadoop.hbase.regionserver.HRegion.FlushResultImpl;
 import org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult;
 import org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController;
+import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL;
+import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -119,14 +123,12 @@ public class TestHRegionReplayEvents {
       Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3")};
 
   // Test names
-  protected byte[] tableName;
-  protected String method;
+  protected TableName tableName;
   protected final byte[] row = Bytes.toBytes("rowA");
   protected final byte[] row2 = Bytes.toBytes("rowB");
   protected byte[] cq = Bytes.toBytes("cq");
 
   // per test fields
-  private Path rootDir;
   private HTableDescriptor htd;
   private long time;
   private RegionServerServices rss;
@@ -140,14 +142,10 @@ public class TestHRegionReplayEvents {
   public void setup() throws IOException {
     TEST_UTIL = HBaseTestingUtility.createLocalHTU();
     CONF = TEST_UTIL.getConfiguration();
-    dir = TEST_UTIL.getDataTestDir("TestHRegionReplayEvents").toString();
-    method = name.getMethodName();
-    tableName = Bytes.toBytes(name.getMethodName());
-    rootDir = new Path(dir + method);
-    TEST_UTIL.getConfiguration().set(HConstants.HBASE_DIR, rootDir.toString());
-    method = name.getMethodName();
-
-    htd = new HTableDescriptor(TableName.valueOf(method));
+    TEST_UTIL.createRootDir();
+    tableName = TableName.valueOf(name.getMethodName());
+
+    htd = new HTableDescriptor(tableName);
     for (byte[] family : families) {
       htd.addFamily(new HColumnDescriptor(family));
     }
@@ -161,7 +159,9 @@ public class TestHRegionReplayEvents {
       HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW,
       false, time, 1);
 
-    wals = TestHRegion.createWALFactory(CONF, rootDir);
+    wals = new WALFactory(CONF,
+        Collections.singletonList(new MetricsWAL()),
+        "hregion-" + RandomStringUtils.randomNumeric(8));
     walPrimary = wals.getWAL(primaryHri.getEncodedNameAsBytes(),
         primaryHri.getTable().getNamespace());
     walSecondary = wals.getWAL(secondaryHri.getEncodedNameAsBytes(),
@@ -178,13 +178,13 @@ public class TestHRegionReplayEvents {
       string+"-"+string, 1);
     when(rss.getExecutorService()).thenReturn(es);
 
-//    primaryRegion = HRegion.createHRegion(CONF, rootDir, htd, primaryHri, walPrimary);
+    primaryRegion = HRegion.createHRegion(CONF, htd, primaryHri, walPrimary);
     primaryRegion.close();
     List<Region> regions = new ArrayList<Region>();
     regions.add(primaryRegion);
     when(rss.getOnlineRegions()).thenReturn(regions);
 
-//    primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null);
+    primaryRegion = HRegion.openHRegion(primaryHri, htd, walPrimary, CONF, rss, null);
     secondaryRegion = HRegion.openHRegion(secondaryHri, htd, null, CONF, rss, null);
 
     reader = null;
@@ -197,10 +197,10 @@ public class TestHRegionReplayEvents {
     }
 
     if (primaryRegion != null) {
-      HBaseTestingUtility.closeRegionAndWAL(primaryRegion);
+      TEST_UTIL.destroyRegion(primaryRegion);
     }
     if (secondaryRegion != null) {
-      HBaseTestingUtility.closeRegionAndWAL(secondaryRegion);
+      TEST_UTIL.destroyRegion(secondaryRegion);
     }
 
     EnvironmentEdgeManagerTestHelper.reset();
@@ -208,10 +208,6 @@ public class TestHRegionReplayEvents {
     TEST_UTIL.cleanupTestDir();
   }
 
-  String getName() {
-    return name.getMethodName();
-  }
-
   // Some of the test cases are as follows:
   // 1. replay flush start marker again
   // 2. replay flush with smaller seqId than what is there in memstore snapshot
@@ -824,7 +820,7 @@ public class TestHRegionReplayEvents {
 
     // close the region and open again.
     primaryRegion.close();
-//    primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null);
+    primaryRegion = HRegion.openHRegion(primaryHri, htd, walPrimary, CONF, rss, null);
 
     // now replay the edits and the flush marker
     reader =  createWALReaderForPrimary();
@@ -904,7 +900,7 @@ public class TestHRegionReplayEvents {
 
     // close the region and open again.
     primaryRegion.close();
-//    primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null);
+    primaryRegion = HRegion.openHRegion(primaryHri, htd, walPrimary, CONF, rss, null);
 
     // now replay the edits and the flush marker
     reader =  createWALReaderForPrimary();
@@ -983,7 +979,7 @@ public class TestHRegionReplayEvents {
 
     // close the region and open again.
     primaryRegion.close();
-//    primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null);
+    primaryRegion = HRegion.openHRegion(primaryHri, htd, walPrimary, CONF, rss, null);
 
     // now replay the edits and the flush marker
     reader =  createWALReaderForPrimary();
@@ -1082,11 +1078,8 @@ public class TestHRegionReplayEvents {
   public void testSeqIdsFromReplay() throws IOException {
     // test the case where seqId's coming from replayed WALEdits are made persisted with their
     // original seqIds and they are made visible through mvcc read point upon replay
-    String method = name.getMethodName();
-    byte[] tableName = Bytes.toBytes(method);
     byte[] family = Bytes.toBytes("family");
-
-    HRegion region = initHRegion(tableName, method, family);
+    HRegion region = initHRegion(tableName, family);
     try {
       // replay an entry that is bigger than current read point
       long readPoint = region.getMVCC().getReadPoint();
@@ -1327,7 +1320,7 @@ public class TestHRegionReplayEvents {
     disableReads(secondaryRegion);
 
     primaryRegion.close();
-//    primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null);
+    primaryRegion = HRegion.openHRegion(primaryHri, htd, walPrimary, CONF, rss, null);
 
     reader = createWALReaderForPrimary();
     while (true) {
@@ -1349,6 +1342,7 @@ public class TestHRegionReplayEvents {
   }
 
   @Test
+  // TODO(fsredo): fix this test.
   public void testRefreshStoreFiles() throws IOException {
     assertEquals(0, primaryRegion.getStoreFileList(families).size());
     assertEquals(0, secondaryRegion.getStoreFileList(families).size());
@@ -1364,7 +1358,7 @@ public class TestHRegionReplayEvents {
     // refresh the store file list, and ensure that the files are picked up.
     secondaryRegion.refreshStoreFiles();
     assertPathListsEqual(primaryRegion.getStoreFileList(families),
-      secondaryRegion.getStoreFileList(families));
+        secondaryRegion.getStoreFileList(families));
     assertEquals(families.length, secondaryRegion.getStoreFileList(families).size());
 
     LOG.info("-- Verifying edits from secondary");
@@ -1477,7 +1471,7 @@ public class TestHRegionReplayEvents {
 
     // close the region and open again.
     primaryRegion.close();
-//    primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null);
+    primaryRegion = HRegion.openHRegion(primaryHri, htd, walPrimary, CONF, rss, null);
 
     // bulk load a file into primary region
     Random random = new Random();
@@ -1660,16 +1654,9 @@ public class TestHRegionReplayEvents {
     }
   }
 
-  private static HRegion initHRegion(byte[] tableName,
-      String callingMethod, byte[]... families) throws IOException {
-    return initHRegion(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW,
-      callingMethod, TEST_UTIL.getConfiguration(), false, Durability.SYNC_WAL, null, families);
-  }
-
-  private static HRegion initHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
-      String callingMethod, Configuration conf, boolean isReadOnly, Durability durability,
-      WAL wal, byte[]... families) throws IOException {
-    return TEST_UTIL.createLocalHRegion(tableName, startKey, stopKey, callingMethod, conf,
-      isReadOnly, durability, wal, families);
+  private static HRegion initHRegion(TableName tableName, byte[]... families) throws IOException {
+    return TEST_UTIL.createLocalHRegion(tableName, HConstants.EMPTY_START_ROW,
+        HConstants.EMPTY_END_ROW, false, Durability.SYNC_WAL, null,
+        families);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d1813a2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java
index d66899b..d705f3b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java
@@ -62,7 +62,7 @@ public class TestHRegionWithInMemoryFlush extends TestHRegion{
 
   /**
    * @return A region on which you must call
-   *         {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done.
+   *         {@link HBaseTestingUtility#closeRegionAndWAL(Region)} when done.
    */
   @Override
   public HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey,
@@ -78,8 +78,6 @@ public class TestHRegionWithInMemoryFlush extends TestHRegion{
 
   /**
    * Splits twice and verifies getting from each of the split regions.
-   *
-   * @throws Exception
    */
   @Override
   public void testBasicSplit() throws Exception {

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d1813a2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
index 3ef89ad..5a96c22 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
@@ -110,16 +110,13 @@ public class TestMajorCompaction {
 
   @After
   public void tearDown() throws Exception {
-    WAL wal = ((HRegion)r).getWAL();
-    ((HRegion)r).close();
-    wal.close();
+    UTIL.destroyRegion(r);
   }
 
   /**
    * Test that on a major compaction, if all cells are expired or deleted, then
    * we'll end up with no product.  Make sure scanner over region returns
    * right answer in this case - and that it just basically works.
-   * @throws IOException
    */
   @Test
   public void testMajorCompactingToNoOutput() throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d1813a2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java
index 1bd20c6..d6084c7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java
@@ -90,9 +90,7 @@ public class TestMinorCompaction {
 
   @After
   public void tearDown() throws Exception {
-    WAL wal = ((HRegion)r).getWAL();
-    ((HRegion)r).close();
-    wal.close();
+    UTIL.destroyRegion(r);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d1813a2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
index 6bfaa59..ce759fd 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
@@ -21,7 +21,6 @@ import com.google.common.hash.Hashing;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -69,8 +68,6 @@ public class TestPerColumnFamilyFlush {
 
   private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
 
-  private static final Path DIR = TEST_UTIL.getDataTestDir("TestHRegion");
-
   public static final TableName TABLENAME = TableName.valueOf("TestPerColumnFamilyFlush", "t1");
 
   public static final byte[][] FAMILIES = { Bytes.toBytes("f1"), Bytes.toBytes("f2"),
@@ -82,14 +79,13 @@ public class TestPerColumnFamilyFlush {
 
   public static final byte[] FAMILY3 = FAMILIES[2];
 
-  private HRegion initHRegion(String callingMethod, Configuration conf) throws IOException {
+  private HRegion initHRegion(Configuration conf) throws IOException {
     HTableDescriptor htd = new HTableDescriptor(TABLENAME);
     for (byte[] family : FAMILIES) {
       htd.addFamily(new HColumnDescriptor(family));
     }
     HRegionInfo info = new HRegionInfo(TABLENAME, null, null, false);
-    Path path = new Path(DIR, callingMethod);
-    return HBaseTestingUtility.createRegionAndWAL(info, path, conf, htd);
+    return TEST_UTIL.createLocalHRegion(info, htd, conf);
   }
 
   // A helper function to create puts.
@@ -130,7 +126,7 @@ public class TestPerColumnFamilyFlush {
     conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN,
       100 * 1024);
     // Intialize the region
-    Region region = initHRegion("testSelectiveFlushWhenEnabled", conf);
+    Region region = initHRegion(conf);
     // Add 1200 entries for CF1, 100 for CF2 and 50 for CF3
     for (int i = 1; i <= 1200; i++) {
       region.put(createPut(1, i));
@@ -273,7 +269,7 @@ public class TestPerColumnFamilyFlush {
     conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushAllStoresPolicy.class.getName());
 
     // Intialize the HRegion
-    HRegion region = initHRegion("testSelectiveFlushWhenNotEnabled", conf);
+    HRegion region = initHRegion(conf);
     // Add 1200 entries for CF1, 100 for CF2 and 50 for CF3
     for (int i = 1; i <= 1200; i++) {
       region.put(createPut(1, i));

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d1813a2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java
index 1583bf8..c04a564 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.CategoryBasedTimeout;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.Scan;
@@ -83,9 +84,8 @@ public class TestRegionIncrement {
   private HRegion getRegion(final Configuration conf, final String tableName) throws IOException {
     WAL wal = new FSHLog(FileSystem.get(conf), TEST_UTIL.getDataTestDir(),
       TEST_UTIL.getDataTestDir().toString(), conf);
-    return (HRegion)TEST_UTIL.createLocalHRegion(Bytes.toBytes(tableName),
-      HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, tableName, conf,
-      false, Durability.SKIP_WAL, wal, INCREMENT_BYTES);
+    return TEST_UTIL.createLocalHRegion(TableName.valueOf(tableName), HConstants.EMPTY_BYTE_ARRAY,
+        HConstants.EMPTY_BYTE_ARRAY, false, Durability.SKIP_WAL, wal, INCREMENT_BYTES);
   }
 
   private void closeRegion(final HRegion region) throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d1813a2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java
index 57d9365..1c77b79 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.fs.RegionStorage;
 import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
@@ -81,38 +82,28 @@ public class TestRegionMergeTransaction {
 
   @Before
   public void setup() throws IOException {
-    this.fs = FileSystem.get(TEST_UTIL.getConfiguration());
-    this.fs.delete(this.testdir, true);
+    fs = FileSystem.get(TEST_UTIL.getConfiguration());
     final Configuration walConf = new Configuration(TEST_UTIL.getConfiguration());
     FSUtils.setRootDir(walConf, this.testdir);
     this.wals = new WALFactory(walConf, null, TestRegionMergeTransaction.class.getName());
-    this.region_a = createRegion(this.testdir, this.wals, STARTROW_A, STARTROW_B);
-    this.region_b = createRegion(this.testdir, this.wals, STARTROW_B, STARTROW_C);
-    this.region_c = createRegion(this.testdir, this.wals, STARTROW_C, ENDROW);
+    this.region_a = createRegion(STARTROW_A, STARTROW_B);
+    this.region_b = createRegion(STARTROW_B, STARTROW_C);
+    this.region_c = createRegion(STARTROW_C, ENDROW);
     assert region_a != null && region_b != null && region_c != null;
     TEST_UTIL.getConfiguration().setBoolean("hbase.testing.nocluster", true);
   }
 
   @After
   public void teardown() throws IOException {
-//    for (HRegion region : new HRegion[] { region_a, region_b, region_c }) {
-//      if (region != null && !region.isClosed()) region.close();
-//      if (this.fs.exists(region.getRegionStorage().getRegionDir())
-//          && !this.fs.delete(region.getRegionStorage().getRegionDir(), true)) {
-//        throw new IOException("Failed deleting of "
-//            + region.getRegionStorage().getRegionDir());
-//      }
-//    }
-//    if (this.wals != null) {
-//      this.wals.close();
-//    }
-//    this.fs.delete(this.testdir, true);
+    TEST_UTIL.destroyRegion(region_a);
+    TEST_UTIL.destroyRegion(region_b);
+    TEST_UTIL.destroyRegion(region_c);
+    TEST_UTIL.cleanupTestDir();
   }
 
   /**
    * Test straight prepare works. Tries to merge on {@link #region_a} and
    * {@link #region_b}
-   * @throws IOException
    */
   @Test
   public void testPrepare() throws IOException {
@@ -258,7 +249,7 @@ public class TestRegionMergeTransaction {
     Server mockServer = new HRegionServer(TEST_UTIL.getConfiguration(), cp);
     HRegion mergedRegion = (HRegion)mt.execute(mockServer, null);
     // Do some assertions about execution.
-    // TODO move tests to rely on RegionStorage
+    // TODO(fsredo) move tests to rely on RegionStorage
     final Path mergesdir = ((LegacyPathIdentifier)mt.getMergesDir()).path;
     assertTrue(this.fs.exists(mergesdir));
     // Assert region_a and region_b is closed.
@@ -379,11 +370,8 @@ public class TestRegionMergeTransaction {
     // Make sure that merged region is still in the filesystem, that
     // they have not been removed; this is supposed to be the case if we go
     // past point of no return.
-//    Path tableDir = this.region_a.getRegionStorage().getRegionDir()
-//        .getParent();
-//    Path mergedRegionDir = new Path(tableDir, mt.getMergedRegionInfo()
-//        .getEncodedName());
-//    assertTrue(TEST_UTIL.getTestFileSystem().exists(mergedRegionDir));
+    assertTrue(RegionStorage.open(TEST_UTIL.getConfiguration(), mt.getMergedRegionInfo(), false)
+        .exists());
   }
 
   @Test
@@ -435,21 +423,17 @@ public class TestRegionMergeTransaction {
   private class MockedFailedMergedRegionOpen extends IOException {
   }
 
-  private HRegion createRegion(final Path testdir, final WALFactory wals,
-      final byte[] startrow, final byte[] endrow)
-      throws IOException {
+  private HRegion createRegion(final byte[] startrow, final byte[] endrow) throws IOException {
     // Make a region with start and end keys.
     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("table"));
     HColumnDescriptor hcd = new HColumnDescriptor(CF);
     htd.addFamily(hcd);
     HRegionInfo hri = new HRegionInfo(htd.getTableName(), startrow, endrow);
-    HRegion a = HBaseTestingUtility.createRegionAndWAL(hri, testdir,
-        TEST_UTIL.getConfiguration(), htd);
+    Region a = TEST_UTIL.createLocalHRegion(hri, htd);
     HBaseTestingUtility.closeRegionAndWAL(a);
-//    return HRegion.openHRegion(testdir, hri, htd,
-//      wals.getWAL(hri.getEncodedNameAsBytes(), hri.getTable().getNamespace()),
-//      TEST_UTIL.getConfiguration());
-    return null;
+    return HRegion.openHRegion(hri, htd,
+      wals.getWAL(hri.getEncodedNameAsBytes(), hri.getTable().getNamespace()),
+      TEST_UTIL.getConfiguration());
   }
 
   private int countRows(final HRegion r) throws IOException {
@@ -472,11 +456,8 @@ public class TestRegionMergeTransaction {
   /**
    * Load region with rows from 'aaa' to 'zzz', skip the rows which are out of
    * range of the region
-   * @param r Region
-   * @param f Family
    * @param flush flush the cache if true
    * @return Count of rows loaded.
-   * @throws IOException
    */
   private int loadRegion(final HRegion r, final byte[] f, final boolean flush)
       throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d1813a2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java
index 3e02243..fa8f681 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java
@@ -41,7 +41,6 @@ public class TestResettingCounters {
 
   @Test
   public void testResettingCounters() throws Exception {
-
     HBaseTestingUtility htu = new HBaseTestingUtility();
     Configuration conf = htu.getConfiguration();
     FileSystem fs = FileSystem.get(conf);
@@ -62,14 +61,7 @@ public class TestResettingCounters {
     for (byte [] family : families) htd.addFamily(new HColumnDescriptor(family));
 
     HRegionInfo hri = new HRegionInfo(htd.getTableName(), null, null, false);
-    String testDir = htu.getDataTestDir() + "/TestResettingCounters/";
-    Path path = new Path(testDir);
-    if (fs.exists(path)) {
-      if (!fs.delete(path, true)) {
-        throw new IOException("Failed delete of " + path);
-      }
-    }
-    Region region = HBaseTestingUtility.createRegionAndWAL(hri, path, conf, htd);
+    Region region = htu.createLocalHRegion(hri, htd);
     try {
       Increment odd = new Increment(rows[0]);
       odd.setDurability(Durability.SKIP_WAL);
@@ -100,9 +92,8 @@ public class TestResettingCounters {
         assertEquals(6, Bytes.toLong(CellUtil.cloneValue(kvs[i])));
       }
     } finally {
-      HBaseTestingUtility.closeRegionAndWAL(region);
+      htu.destroyRegion(region);
     }
-    HBaseTestingUtility.closeRegionAndWAL(region);
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d1813a2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowTooBig.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowTooBig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowTooBig.java
index 4d3a1c3..a4470b1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowTooBig.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowTooBig.java
@@ -19,7 +19,6 @@
 
 package org.apache.hadoop.hbase.regionserver;
 
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.*;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
@@ -40,22 +39,20 @@ import java.io.IOException;
  */
 @Category({RegionServerTests.class, MediumTests.class})
 public class TestRowTooBig {
-  private final static HBaseTestingUtility HTU = HBaseTestingUtility.createLocalHTU();
-  private static Path rootRegionDir;
+  private final static HBaseTestingUtility TEST_UTIL = HBaseTestingUtility.createLocalHTU();
   private static final HTableDescriptor TEST_HTD =
     new HTableDescriptor(TableName.valueOf(TestRowTooBig.class.getSimpleName()));
 
   @BeforeClass
   public static void before() throws Exception {
-    HTU.startMiniCluster();
-    HTU.getConfiguration().setLong(HConstants.TABLE_MAX_ROWSIZE_KEY,
+    TEST_UTIL.startMiniCluster();
+    TEST_UTIL.getConfiguration().setLong(HConstants.TABLE_MAX_ROWSIZE_KEY,
       10 * 1024 * 1024L);
-    rootRegionDir = HTU.getDataTestDirOnTestFS("TestRowTooBig");
   }
 
   @AfterClass
   public static void after() throws Exception {
-    HTU.shutdownMiniCluster();
+    TEST_UTIL.shutdownMiniCluster();
   }
 
   /**
@@ -67,7 +64,6 @@ public class TestRowTooBig {
    * OOME happened before we actually get to reading results, but
    * during seeking, as each StoreFile gets it's own scanner,
    * and each scanner seeks after the first KV.
-   * @throws IOException
    */
   @Test(expected = RowTooBigException.class)
   public void testScannersSeekOnFewLargeCells() throws IOException {
@@ -85,8 +81,7 @@ public class TestRowTooBig {
     final HRegionInfo hri =
       new HRegionInfo(htd.getTableName(), HConstants.EMPTY_END_ROW,
         HConstants.EMPTY_END_ROW);
-    Region region =
-        HBaseTestingUtility.createRegionAndWAL(hri, rootRegionDir, HTU.getConfiguration(), htd);
+    Region region = TEST_UTIL.createLocalHRegion(hri, htd);
     try {
       // Add 5 cells to memstore
       for (int i = 0; i < 5 ; i++) {
@@ -101,7 +96,7 @@ public class TestRowTooBig {
       Get get = new Get(row1);
       region.get(get);
     } finally {
-      HBaseTestingUtility.closeRegionAndWAL(region);
+      TEST_UTIL.destroyRegion(region);
     }
   }
 
@@ -113,8 +108,6 @@ public class TestRowTooBig {
    *  - try to Get whole row.
    *
    *  OOME happened in StoreScanner.next(..).
-   *
-   * @throws IOException
    */
   @Test(expected = RowTooBigException.class)
   public void testScanAcrossManySmallColumns() throws IOException {
@@ -132,8 +125,7 @@ public class TestRowTooBig {
     final HRegionInfo hri =
       new HRegionInfo(htd.getTableName(), HConstants.EMPTY_END_ROW,
         HConstants.EMPTY_END_ROW);
-    Region region =
-        HBaseTestingUtility.createRegionAndWAL(hri, rootRegionDir, HTU.getConfiguration(), htd);
+    Region region = TEST_UTIL.createLocalHRegion(hri, htd);
     try {
       // Add to memstore
       for (int i = 0; i < 10; i++) {
@@ -150,7 +142,7 @@ public class TestRowTooBig {
       Get get = new Get(row1);
       region.get(get);
     } finally {
-      HBaseTestingUtility.closeRegionAndWAL(region);
+      TEST_UTIL.destroyRegion(region);
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d1813a2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java
index 1b42754..4daa43b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java
@@ -104,7 +104,7 @@ public class TestScanner {
 
   private static final long START_CODE = Long.MAX_VALUE;
 
-  private HRegion region;
+  private Region region;
 
   private byte[] firstRowBytes, secondRowBytes, thirdRowBytes;
   final private byte[] col1;
@@ -123,7 +123,6 @@ public class TestScanner {
 
   /**
    * Test basic stop row filter works.
-   * @throws Exception
    */
   @Test
   public void testStopRow() throws Exception {

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d1813a2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
index e9bb468..bebdf1a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
@@ -289,7 +289,7 @@ public class TestWALLockup {
 
   /**
    * @return A region on which you must call
-   *         {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done.
+   *         {@link HBaseTestingUtility#closeRegionAndWAL(Region)} when done.
    */
   public static HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, WAL wal)
   throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d1813a2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
index 74826b0..c85c539 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
@@ -63,7 +63,7 @@ public class TestWalAndCompactingMemStoreFlush {
   public static final byte[] FAMILY2 = FAMILIES[1];
   public static final byte[] FAMILY3 = FAMILIES[2];
 
-  private HRegion initHRegion(String callingMethod, Configuration conf) throws IOException {
+  private HRegion initHRegion(Configuration conf) throws IOException {
     int i=0;
     HTableDescriptor htd = new HTableDescriptor(TABLENAME);
     for (byte[] family : FAMILIES) {
@@ -75,8 +75,7 @@ public class TestWalAndCompactingMemStoreFlush {
     }
 
     HRegionInfo info = new HRegionInfo(TABLENAME, null, null, false);
-    Path path = new Path(DIR, callingMethod);
-    return HBaseTestingUtility.createRegionAndWAL(info, path, conf, htd);
+    return TEST_UTIL.createLocalHRegion(info, htd, conf);
   }
 
   // A helper function to create puts.
@@ -132,7 +131,7 @@ public class TestWalAndCompactingMemStoreFlush {
     conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.25);
 
     // Intialize the region
-    Region region = initHRegion("testSelectiveFlushWhenEnabled", conf);
+    Region region = initHRegion(conf);
 
     // Add 1200 entries for CF1, 100 for CF2 and 50 for CF3
     for (int i = 1; i <= 1200; i++) {
@@ -385,7 +384,7 @@ public class TestWalAndCompactingMemStoreFlush {
     conf.setInt("hbase.hregion.compacting.memstore.type",1);
 
     // Intialize the region
-    Region region = initHRegion("testSelectiveFlushWhenEnabled", conf);
+    Region region = initHRegion(conf);
 
     // Add 1200 entries for CF1, 100 for CF2 and 50 for CF3
     for (int i = 1; i <= 1200; i++) {
@@ -629,7 +628,7 @@ public class TestWalAndCompactingMemStoreFlush {
     conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5);
 
     // Intialize the HRegion
-    HRegion region = initHRegion("testSelectiveFlushWhenNotEnabled", conf);
+    HRegion region = initHRegion(conf);
     // Add 1200 entries for CF1, 100 for CF2 and 50 for CF3
     for (int i = 1; i <= 1200; i++) {
       region.put(createPut(1, i));

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d1813a2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java
index 3420635..985f884 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.coprocessor.SampleRegionWALObserver;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdge;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -164,8 +165,6 @@ public abstract class AbstractTestFSWAL {
 
   /**
    * helper method to simulate region flush for a WAL.
-   * @param wal
-   * @param regionEncodedName
    */
   protected void flushRegion(WAL wal, byte[] regionEncodedName, Set<byte[]> flushedFamilyNames) {
     wal.startCacheFlush(regionEncodedName, flushedFamilyNames);
@@ -175,7 +174,6 @@ public abstract class AbstractTestFSWAL {
   /**
    * tests the log comparator. Ensure that we are not mixing meta logs with non-meta logs (throws
    * exception if we do). Comparison is based on the timestamp present in the wal name.
-   * @throws Exception
    */
   @Test
   public void testWALComparator() throws Exception {
@@ -343,7 +341,6 @@ public abstract class AbstractTestFSWAL {
    * slowing appends in the background ring buffer thread while in foreground we call flush. The
    * addition of the sync over HRegion in flush should fix an issue where flush was returning before
    * all of its appends had made it out to the WAL (HBASE-11109).
-   * @throws IOException
    * @see <a href="https://issues.apache.org/jira/browse/HBASE-11109">HBASE-11109</a>
    */
   @Test
@@ -354,8 +351,7 @@ public abstract class AbstractTestFSWAL {
     final byte[] rowName = tableName.getName();
     final HTableDescriptor htd = new HTableDescriptor(tableName);
     htd.addFamily(new HColumnDescriptor("f"));
-    HRegion r = HBaseTestingUtility.createRegionAndWAL(hri, TEST_UTIL.getDefaultRootDirPath(),
-      TEST_UTIL.getConfiguration(), htd);
+    Region r = TEST_UTIL.createLocalHRegion(hri, htd);
     HBaseTestingUtility.closeRegionAndWAL(r);
     final int countPerFamily = 10;
     final AtomicBoolean goslow = new AtomicBoolean(false);
@@ -375,9 +371,7 @@ public abstract class AbstractTestFSWAL {
           }
         }
       });
-//    HRegion region = HRegion.openHRegion(TEST_UTIL.getConfiguration(),
-//      TEST_UTIL.getTestFileSystem(), TEST_UTIL.getDefaultRootDirPath(), hri, htd, wal);
-    HRegion region = null;
+    HRegion region = HRegion.openHRegion(hri, htd, wal, TEST_UTIL.getConfiguration());
     EnvironmentEdge ee = EnvironmentEdgeManager.getDelegate();
     try {
       List<Put> puts = null;


Mime
View raw message