hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject [05/12] hbase git commit: HBASE-10378 Refactor write-ahead-log implementation -- ADDEDNUM
Date Tue, 18 Nov 2014 20:12:59 GMT
http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java
index ca95c4a..58c61c6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java
@@ -202,12 +202,12 @@ public class MetricsRegionServerWrapperStub implements MetricsRegionServerWrappe
   }
 
   @Override
-  public long getNumHLogFiles() {
+  public long getNumWALFiles() {
     return 10;
   }
 
   @Override
-  public long getHLogFileSize() {
+  public long getWALFileSize() {
     return 1024000;
   }
 
@@ -250,4 +250,4 @@ public class MetricsRegionServerWrapperStub implements MetricsRegionServerWrappe
   public long getHedgedReadWins() {
     return 10;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
index 86cca00..a4b4959 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
@@ -60,7 +60,7 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.filter.BinaryComparator;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.io.HeapSize;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -607,7 +607,7 @@ public class TestAtomicOperation {
 
   public static class MockHRegion extends HRegion {
 
-    public MockHRegion(Path tableDir, HLog log, FileSystem fs, Configuration conf,
+    public MockHRegion(Path tableDir, WAL log, FileSystem fs, Configuration conf,
         final HRegionInfo regionInfo, final HTableDescriptor htd, RegionServerServices rsServices) {
       super(tableDir, log, fs, conf, regionInfo, htd, rsServices);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
index 1f63a48..dc142d6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
@@ -51,9 +51,10 @@ import org.apache.hadoop.hbase.io.hfile.HFileBlock;
 import org.apache.hadoop.hbase.io.hfile.HFileReaderV2;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
-import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
+import org.apache.hadoop.hbase.wal.DefaultWALProvider;
+import org.apache.hadoop.hbase.wal.WALFactory;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Rule;
@@ -126,7 +127,7 @@ public class TestCacheOnWriteInSchema {
   private final String testDescription;
   private HRegion region;
   private HStore store;
-  private HLog hlog;
+  private WALFactory walFactory;
   private FileSystem fs;
 
   public TestCacheOnWriteInSchema(CacheOnWriteType cowType) {
@@ -165,15 +166,16 @@ public class TestCacheOnWriteInSchema {
     htd.addFamily(hcd);
 
     // Create a store based on the schema
-    Path basedir = new Path(DIR);
-    String logName = "logs";
-    Path logdir = new Path(DIR, logName);
+    final String id = TestCacheOnWriteInSchema.class.getName();
+    final Path logdir = new Path(FSUtils.getRootDir(conf),
+        DefaultWALProvider.getWALDirectoryName(id));
     fs.delete(logdir, true);
 
     HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
-    hlog = HLogFactory.createHLog(fs, basedir, logName, conf);
+    walFactory = new WALFactory(conf, null, id);
 
-    region = TEST_UTIL.createLocalHRegion(info, htd, hlog);
+    region = TEST_UTIL.createLocalHRegion(info, htd,
+        walFactory.getWAL(info.getEncodedNameAsBytes()));
     store = new HStore(region, hcd, conf);
   }
 
@@ -187,7 +189,7 @@ public class TestCacheOnWriteInSchema {
       ex = e;
     }
     try {
-      hlog.closeAndDelete();
+      walFactory.close();
     } catch (IOException e) {
       LOG.warn("Caught Exception", e);
       ex = e;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
index 4900ef4..7cfa475 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
@@ -59,7 +59,7 @@ import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Threads;
@@ -118,9 +118,9 @@ public class TestCompaction {
 
   @After
   public void tearDown() throws Exception {
-    HLog hlog = r.getLog();
+    WAL wal = r.getWAL();
     this.r.close();
-    hlog.closeAndDelete();
+    wal.close();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
index a78e1d5..43bc9f1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
@@ -39,8 +39,8 @@ import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
-import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
+import org.apache.hadoop.hbase.wal.DefaultWALProvider;
+import org.apache.hadoop.hbase.wal.WALFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.junit.After;
@@ -65,7 +65,7 @@ public class TestDefaultCompactSelection extends TestCase {
   protected static final long minSize = 10;
   protected static final long maxSize = 2100;
 
-  private HLog hlog;
+  private WALFactory wals;
   private HRegion region;
 
   @Override
@@ -82,9 +82,9 @@ public class TestDefaultCompactSelection extends TestCase {
     this.conf.unset("hbase.hstore.compaction.min.size");
 
     //Setting up a Store
+    final String id = TestDefaultCompactSelection.class.getName();
     Path basedir = new Path(DIR);
-    String logName = "logs";
-    Path logdir = new Path(DIR, logName);
+    final Path logdir = new Path(basedir, DefaultWALProvider.getWALDirectoryName(id));
     HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("family"));
     FileSystem fs = FileSystem.get(conf);
 
@@ -94,11 +94,14 @@ public class TestDefaultCompactSelection extends TestCase {
     htd.addFamily(hcd);
     HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
 
-    hlog = HLogFactory.createHLog(fs, basedir, logName, conf);
+    final Configuration walConf = new Configuration(conf);
+    FSUtils.setRootDir(walConf, basedir);
+    wals = new WALFactory(walConf, null, id);
     region = HRegion.createHRegion(info, basedir, conf, htd);
     HRegion.closeHRegion(region);
     Path tableDir = FSUtils.getTableDir(basedir, htd.getTableName());
-    region = new HRegion(tableDir, hlog, fs, conf, info, htd, null);
+    region = new HRegion(tableDir, wals.getWAL(info.getEncodedNameAsBytes()), fs, conf, info, htd,
+        null);
 
     store = new HStore(region, hcd, conf);
 
@@ -116,7 +119,7 @@ public class TestDefaultCompactSelection extends TestCase {
       ex = e;
     }
     try {
-      hlog.closeAndDelete();
+      wals.close();
     } catch (IOException e) {
       LOG.warn("Caught Exception", e);
       ex = e;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
index b270a7d..92351f4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
@@ -142,7 +142,7 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase {
         } catch (Exception e) {
           e.printStackTrace();
         }
-        mr.getLog().closeAndDelete();
+        mr.getWAL().close();
       }
     }
   }
@@ -293,7 +293,7 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase {
         } catch (Exception e) {
           e.printStackTrace();
         }
-        region.getLog().closeAndDelete();
+        region.getWAL().close();
       }
     }
   }
@@ -349,7 +349,7 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase {
         } catch (Exception e) {
           e.printStackTrace();
         }
-        region.getLog().closeAndDelete();
+        region.getWAL().close();
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index b4e94bf..f7936c3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -124,13 +124,17 @@ import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescripto
 import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
 import org.apache.hadoop.hbase.regionserver.HRegion.RowLock;
 import org.apache.hadoop.hbase.regionserver.TestStore.FaultyFileSystem;
-import org.apache.hadoop.hbase.regionserver.wal.FaultyHLog;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
-import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
-import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
-import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
 import org.apache.hadoop.hbase.regionserver.wal.MetricsWALSource;
+import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
+import org.apache.hadoop.hbase.regionserver.wal.WALUtil;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.wal.DefaultWALProvider;
+import org.apache.hadoop.hbase.wal.FaultyFSLog;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALFactory;
+import org.apache.hadoop.hbase.wal.WALKey;
+import org.apache.hadoop.hbase.wal.WALProvider;
+import org.apache.hadoop.hbase.wal.WALSplitter;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.test.MetricsAssertHelper;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -254,9 +258,9 @@ public class TestHRegion {
    */
   @Test (timeout=60000)
   public void testMemstoreSnapshotSize() throws IOException {
-    class MyFaultyHLog extends FaultyHLog {
+    class MyFaultyFSLog extends FaultyFSLog {
       StoreFlushContext storeFlushCtx;
-      public MyFaultyHLog(FileSystem fs, Path rootDir, String logName, Configuration conf)
+      public MyFaultyFSLog(FileSystem fs, Path rootDir, String logName, Configuration conf)
           throws IOException {
         super(fs, rootDir, logName, conf);
       }
@@ -274,7 +278,7 @@ public class TestHRegion {
 
     FileSystem fs = FileSystem.get(CONF);
     Path rootDir = new Path(dir + "testMemstoreSnapshotSize");
-    MyFaultyHLog faultyLog = new MyFaultyHLog(fs, rootDir, "testMemstoreSnapshotSize", CONF);
+    MyFaultyFSLog faultyLog = new MyFaultyFSLog(fs, rootDir, "testMemstoreSnapshotSize", CONF);
     HRegion region = initHRegion(tableName, null, null, name.getMethodName(),
       CONF, false, Durability.SYNC_WAL, faultyLog, COLUMN_FAMILY_BYTES);
 
@@ -285,7 +289,7 @@ public class TestHRegion {
 
     Put put = new Put(value);
     put.add(COLUMN_FAMILY_BYTES, Bytes.toBytes("abc"), value);
-    faultyLog.setFailureType(FaultyHLog.FailureType.SYNC);
+    faultyLog.setFailureType(FaultyFSLog.FailureType.SYNC);
 
     boolean threwIOE = false;
     try {
@@ -512,12 +516,13 @@ public class TestHRegion {
     TableName tableName = TableName.valueOf(method);
     byte[] family = Bytes.toBytes("family");
     this.region = initHRegion(tableName, method, CONF, family);
+    final WALFactory wals = new WALFactory(CONF, null, method);
     try {
       Path regiondir = region.getRegionFileSystem().getRegionDir();
       FileSystem fs = region.getRegionFileSystem().getFileSystem();
       byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
 
-      Path recoveredEditsDir = HLogUtil.getRegionDirRecoveredEditsDir(regiondir);
+      Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
 
       long maxSeqId = 1050;
       long minSeqId = 1000;
@@ -525,13 +530,13 @@ public class TestHRegion {
       for (long i = minSeqId; i <= maxSeqId; i += 10) {
         Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
         fs.create(recoveredEdits);
-        HLog.Writer writer = HLogFactory.createRecoveredEditsWriter(fs, recoveredEdits, CONF);
+        WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
 
         long time = System.nanoTime();
         WALEdit edit = new WALEdit();
         edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes
             .toBytes(i)));
-        writer.append(new HLog.Entry(new HLogKey(regionName, tableName, i, time,
+        writer.append(new WAL.Entry(new HLogKey(regionName, tableName, i, time,
             HConstants.DEFAULT_CLUSTER_ID), edit));
 
         writer.close();
@@ -554,6 +559,7 @@ public class TestHRegion {
     } finally {
       HRegion.closeHRegion(this.region);
       this.region = null;
+      wals.close();
     }
   }
 
@@ -563,12 +569,13 @@ public class TestHRegion {
     TableName tableName = TableName.valueOf(method);
     byte[] family = Bytes.toBytes("family");
     this.region = initHRegion(tableName, method, CONF, family);
+    final WALFactory wals = new WALFactory(CONF, null, method);
     try {
       Path regiondir = region.getRegionFileSystem().getRegionDir();
       FileSystem fs = region.getRegionFileSystem().getFileSystem();
       byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
 
-      Path recoveredEditsDir = HLogUtil.getRegionDirRecoveredEditsDir(regiondir);
+      Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
 
       long maxSeqId = 1050;
       long minSeqId = 1000;
@@ -576,13 +583,13 @@ public class TestHRegion {
       for (long i = minSeqId; i <= maxSeqId; i += 10) {
         Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
         fs.create(recoveredEdits);
-        HLog.Writer writer = HLogFactory.createRecoveredEditsWriter(fs, recoveredEdits, CONF);
+        WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
 
         long time = System.nanoTime();
         WALEdit edit = new WALEdit();
         edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes
             .toBytes(i)));
-        writer.append(new HLog.Entry(new HLogKey(regionName, tableName, i, time,
+        writer.append(new WAL.Entry(new HLogKey(regionName, tableName, i, time,
             HConstants.DEFAULT_CLUSTER_ID), edit));
 
         writer.close();
@@ -610,6 +617,7 @@ public class TestHRegion {
     } finally {
       HRegion.closeHRegion(this.region);
       this.region = null;
+      wals.close();
     }
   }
 
@@ -621,7 +629,7 @@ public class TestHRegion {
       Path regiondir = region.getRegionFileSystem().getRegionDir();
       FileSystem fs = region.getRegionFileSystem().getFileSystem();
 
-      Path recoveredEditsDir = HLogUtil.getRegionDirRecoveredEditsDir(regiondir);
+      Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
       for (int i = 1000; i < 1050; i += 10) {
         Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
         FSDataOutputStream dos = fs.create(recoveredEdits);
@@ -651,6 +659,7 @@ public class TestHRegion {
     TableName tableName = TableName.valueOf(method);
     byte[] family = Bytes.toBytes("family");
     this.region = initHRegion(tableName, method, CONF, family);
+    final WALFactory wals = new WALFactory(CONF, null, method);
     try {
       Path regiondir = region.getRegionFileSystem().getRegionDir();
       FileSystem fs = region.getRegionFileSystem().getFileSystem();
@@ -659,7 +668,7 @@ public class TestHRegion {
       assertEquals(0, region.getStoreFileList(
         region.getStores().keySet().toArray(new byte[0][])).size());
 
-      Path recoveredEditsDir = HLogUtil.getRegionDirRecoveredEditsDir(regiondir);
+      Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
 
       long maxSeqId = 1050;
       long minSeqId = 1000;
@@ -667,7 +676,7 @@ public class TestHRegion {
       for (long i = minSeqId; i <= maxSeqId; i += 10) {
         Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
         fs.create(recoveredEdits);
-        HLog.Writer writer = HLogFactory.createRecoveredEditsWriter(fs, recoveredEdits, CONF);
+        WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
 
         long time = System.nanoTime();
         WALEdit edit = null;
@@ -685,7 +694,7 @@ public class TestHRegion {
           edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes
             .toBytes(i)));
         }
-        writer.append(new HLog.Entry(new HLogKey(regionName, tableName, i, time,
+        writer.append(new WAL.Entry(new HLogKey(regionName, tableName, i, time,
             HConstants.DEFAULT_CLUSTER_ID), edit));
         writer.close();
       }
@@ -706,7 +715,9 @@ public class TestHRegion {
     } finally {
       HRegion.closeHRegion(this.region);
       this.region = null;
-    }  }
+      wals.close();
+    }
+  }
 
   @Test
   public void testRecoveredEditsReplayCompaction() throws Exception {
@@ -714,6 +725,7 @@ public class TestHRegion {
     TableName tableName = TableName.valueOf(method);
     byte[] family = Bytes.toBytes("family");
     this.region = initHRegion(tableName, method, CONF, family);
+    final WALFactory wals = new WALFactory(CONF, null, method);
     try {
       Path regiondir = region.getRegionFileSystem().getRegionDir();
       FileSystem fs = region.getRegionFileSystem().getFileSystem();
@@ -758,18 +770,18 @@ public class TestHRegion {
           .getRegionInfo(), family, storeFiles, Lists.newArrayList(newFile), region
           .getRegionFileSystem().getStoreDir(Bytes.toString(family)));
 
-      HLogUtil.writeCompactionMarker(region.getLog(), this.region.getTableDesc(),
+      WALUtil.writeCompactionMarker(region.getWAL(), this.region.getTableDesc(),
           this.region.getRegionInfo(), compactionDescriptor, new AtomicLong(1));
 
-      Path recoveredEditsDir = HLogUtil.getRegionDirRecoveredEditsDir(regiondir);
+      Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
 
       Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", 1000));
       fs.create(recoveredEdits);
-      HLog.Writer writer = HLogFactory.createRecoveredEditsWriter(fs, recoveredEdits, CONF);
+      WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
 
       long time = System.nanoTime();
 
-      writer.append(new HLog.Entry(new HLogKey(regionName, tableName, 10, time,
+      writer.append(new WAL.Entry(new HLogKey(regionName, tableName, 10, time,
           HConstants.DEFAULT_CLUSTER_ID), WALEdit.createCompaction(region.getRegionInfo(),
           compactionDescriptor)));
       writer.close();
@@ -798,6 +810,7 @@ public class TestHRegion {
     } finally {
       HRegion.closeHRegion(this.region);
       this.region = null;
+      wals.close();
     }
   }
 
@@ -808,11 +821,13 @@ public class TestHRegion {
     TableName tableName = TableName.valueOf(method);
     byte[] family = Bytes.toBytes("family");
     Path logDir = TEST_UTIL.getDataTestDirOnTestFS(method + ".log");
-    HLog hlog = HLogFactory.createHLog(FILESYSTEM, logDir, "logs",
-      TEST_UTIL.getConfiguration());
+    final Configuration walConf = new Configuration(TEST_UTIL.getConfiguration());
+    FSUtils.setRootDir(walConf, logDir);
+    final WALFactory wals = new WALFactory(walConf, null, method);
+    final WAL wal = wals.getWAL(tableName.getName());
 
     this.region = initHRegion(tableName.getName(), HConstants.EMPTY_START_ROW,
-      HConstants.EMPTY_END_ROW, method, CONF, false, Durability.USE_DEFAULT, hlog, family);
+      HConstants.EMPTY_END_ROW, method, CONF, false, Durability.USE_DEFAULT, wal, family);
     try {
       Path regiondir = region.getRegionFileSystem().getRegionDir();
       FileSystem fs = region.getRegionFileSystem().getFileSystem();
@@ -836,59 +851,69 @@ public class TestHRegion {
       }
 
       // now verify that the flush markers are written
-      hlog.close();
-      HLog.Reader reader = HLogFactory.createReader(fs,
-        fs.listStatus(new Path(logDir, "logs"))[0].getPath(),
+      wal.shutdown();
+      WAL.Reader reader = wals.createReader(fs, DefaultWALProvider.getCurrentFileName(wal),
         TEST_UTIL.getConfiguration());
-
-      List<HLog.Entry> flushDescriptors = new ArrayList<HLog.Entry>();
-      long lastFlushSeqId = -1;
-      while (true) {
-        HLog.Entry entry = reader.next();
-        if (entry == null) {
-          break;
-        }
-        Cell cell = entry.getEdit().getCells().get(0);
-        if (WALEdit.isMetaEditFamily(cell)) {
-          FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(cell);
-          assertNotNull(flushDesc);
-          assertArrayEquals(tableName.getName(), flushDesc.getTableName().toByteArray());
-          if (flushDesc.getAction() == FlushAction.START_FLUSH) {
-            assertTrue(flushDesc.getFlushSequenceNumber() > lastFlushSeqId);
-          } else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) {
-            assertTrue(flushDesc.getFlushSequenceNumber() == lastFlushSeqId);
-          }
-          lastFlushSeqId = flushDesc.getFlushSequenceNumber();
-          assertArrayEquals(regionName, flushDesc.getEncodedRegionName().toByteArray());
-          assertEquals(1, flushDesc.getStoreFlushesCount()); //only one store
-          StoreFlushDescriptor storeFlushDesc = flushDesc.getStoreFlushes(0);
-          assertArrayEquals(family, storeFlushDesc.getFamilyName().toByteArray());
-          assertEquals("family", storeFlushDesc.getStoreHomeDir());
-          if (flushDesc.getAction() == FlushAction.START_FLUSH) {
-            assertEquals(0, storeFlushDesc.getFlushOutputCount());
-          } else {
-            assertEquals(1, storeFlushDesc.getFlushOutputCount()); //only one file from flush
-            assertTrue(storeFiles.contains(storeFlushDesc.getFlushOutput(0)));
+      try {
+        List<WAL.Entry> flushDescriptors = new ArrayList<WAL.Entry>();
+        long lastFlushSeqId = -1;
+        while (true) {
+          WAL.Entry entry = reader.next();
+          if (entry == null) {
+            break;
           }
+          Cell cell = entry.getEdit().getCells().get(0);
+          if (WALEdit.isMetaEditFamily(cell)) {
+            FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(cell);
+            assertNotNull(flushDesc);
+            assertArrayEquals(tableName.getName(), flushDesc.getTableName().toByteArray());
+            if (flushDesc.getAction() == FlushAction.START_FLUSH) {
+              assertTrue(flushDesc.getFlushSequenceNumber() > lastFlushSeqId);
+            } else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) {
+              assertTrue(flushDesc.getFlushSequenceNumber() == lastFlushSeqId);
+            }
+            lastFlushSeqId = flushDesc.getFlushSequenceNumber();
+            assertArrayEquals(regionName, flushDesc.getEncodedRegionName().toByteArray());
+            assertEquals(1, flushDesc.getStoreFlushesCount()); //only one store
+            StoreFlushDescriptor storeFlushDesc = flushDesc.getStoreFlushes(0);
+            assertArrayEquals(family, storeFlushDesc.getFamilyName().toByteArray());
+            assertEquals("family", storeFlushDesc.getStoreHomeDir());
+            if (flushDesc.getAction() == FlushAction.START_FLUSH) {
+              assertEquals(0, storeFlushDesc.getFlushOutputCount());
+            } else {
+              assertEquals(1, storeFlushDesc.getFlushOutputCount()); //only one file from flush
+              assertTrue(storeFiles.contains(storeFlushDesc.getFlushOutput(0)));
+            }
 
-          flushDescriptors.add(entry);
+            flushDescriptors.add(entry);
+          }
         }
-      }
 
-      assertEquals(3 * 2, flushDescriptors.size()); // START_FLUSH and COMMIT_FLUSH per flush
+        assertEquals(3 * 2, flushDescriptors.size()); // START_FLUSH and COMMIT_FLUSH per flush
 
-      // now write those markers to the recovered edits again.
+        // now write those markers to the recovered edits again.
 
-      Path recoveredEditsDir = HLogUtil.getRegionDirRecoveredEditsDir(regiondir);
+        Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
 
-      Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", 1000));
-      fs.create(recoveredEdits);
-      HLog.Writer writer = HLogFactory.createRecoveredEditsWriter(fs, recoveredEdits, CONF);
+        Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", 1000));
+        fs.create(recoveredEdits);
+        WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
 
-      for (HLog.Entry entry : flushDescriptors) {
-        writer.append(entry);
+        for (WAL.Entry entry : flushDescriptors) {
+          writer.append(entry);
+        }
+        writer.close();
+      } finally {
+        if (null != reader) {
+          try {
+            reader.close();
+          } catch (IOException exception) {
+            LOG.warn("Problem closing wal: " + exception.getMessage());
+            LOG.debug("exception details", exception);
+          }
+        }
       }
-      writer.close();
+
 
       // close the region now, and reopen again
       region.close();
@@ -904,6 +929,7 @@ public class TestHRegion {
     } finally {
       HRegion.closeHRegion(this.region);
       this.region = null;
+      wals.close();
     }
   }
 
@@ -953,11 +979,13 @@ public class TestHRegion {
     // spy an actual WAL implementation to throw exception (was not able to mock)
     Path logDir = TEST_UTIL.getDataTestDirOnTestFS(method + "log");
 
-    HLog hlog = spy(HLogFactory.createHLog(FILESYSTEM, logDir, "logs",
-      TEST_UTIL.getConfiguration()));
+    final Configuration walConf = new Configuration(TEST_UTIL.getConfiguration());
+    FSUtils.setRootDir(walConf, logDir);
+    final WALFactory wals = new WALFactory(walConf, null, method);
+    WAL wal = spy(wals.getWAL(tableName.getName()));
 
     this.region = initHRegion(tableName.getName(), HConstants.EMPTY_START_ROW,
-      HConstants.EMPTY_END_ROW, method, CONF, false, Durability.USE_DEFAULT, hlog, family);
+      HConstants.EMPTY_END_ROW, method, CONF, false, Durability.USE_DEFAULT, wal, family);
     try {
       int i = 0;
       Put put = new Put(Bytes.toBytes(i));
@@ -969,7 +997,7 @@ public class TestHRegion {
       IsFlushWALMarker isFlushWALMarker = new IsFlushWALMarker(FlushAction.START_FLUSH);
 
       // throw exceptions if the WalEdit is a start flush action
-      when(hlog.appendNoSync((HTableDescriptor)any(), (HRegionInfo)any(), (HLogKey)any(),
+      when(wal.append((HTableDescriptor)any(), (HRegionInfo)any(), (WALKey)any(),
         (WALEdit)argThat(isFlushWALMarker), (AtomicLong)any(), Mockito.anyBoolean(),
         (List<Cell>)any()))
           .thenThrow(new IOException("Fail to append flush marker"));
@@ -1000,7 +1028,7 @@ public class TestHRegion {
 
       region.close();
       this.region = initHRegion(tableName.getName(), HConstants.EMPTY_START_ROW,
-        HConstants.EMPTY_END_ROW, method, CONF, false, Durability.USE_DEFAULT, hlog, family);
+        HConstants.EMPTY_END_ROW, method, CONF, false, Durability.USE_DEFAULT, wal, family);
       region.put(put);
 
       // 3. Test case where ABORT_FLUSH will throw exception.
@@ -4486,10 +4514,12 @@ public class TestHRegion {
     TableName tableName = TableName.valueOf(method);
     byte[] family = Bytes.toBytes("family");
     Path logDir = new Path(new Path(dir + method), "log");
-    HLog hlog = HLogFactory.createHLog(FILESYSTEM, logDir, UUID.randomUUID().toString(), conf);
-    final HLog log = spy(hlog);
+    final Configuration walConf = new Configuration(conf);
+    FSUtils.setRootDir(walConf, logDir);
+    final WALFactory wals = new WALFactory(walConf, null, UUID.randomUUID().toString());
+    final WAL wal = spy(wals.getWAL(tableName.getName()));
     this.region = initHRegion(tableName.getName(), HConstants.EMPTY_START_ROW,
-        HConstants.EMPTY_END_ROW, method, conf, false, tableDurability, log,
+        HConstants.EMPTY_END_ROW, method, conf, false, tableDurability, wal,
         new byte[][] { family });
 
     Put put = new Put(Bytes.toBytes("r1"));
@@ -4498,8 +4528,8 @@ public class TestHRegion {
     region.put(put);
 
     //verify append called or not
-    verify(log, expectAppend ? times(1) : never())
-      .appendNoSync((HTableDescriptor)any(), (HRegionInfo)any(), (HLogKey)any(),
+    verify(wal, expectAppend ? times(1) : never())
+      .append((HTableDescriptor)any(), (HRegionInfo)any(), (WALKey)any(),
           (WALEdit)any(), (AtomicLong)any(), Mockito.anyBoolean(), (List<Cell>)any());
 
     // verify sync called or not
@@ -4509,9 +4539,9 @@ public class TestHRegion {
         public boolean evaluate() throws Exception {
           try {
             if (expectSync) {
-              verify(log, times(1)).sync(anyLong()); // Hregion calls this one
+              verify(wal, times(1)).sync(anyLong()); // Hregion calls this one
             } else if (expectSyncFromLogSyncer) {
-              verify(log, times(1)).sync(); // log syncer calls this one
+              verify(wal, times(1)).sync(); // wal syncer calls this one
             }
           } catch (Throwable ignore) {
           }
@@ -4519,8 +4549,8 @@ public class TestHRegion {
         }
       });
     } else {
-      //verify(log, never()).sync(anyLong());
-      verify(log, never()).sync();
+      //verify(wal, never()).sync(anyLong());
+      verify(wal, never()).sync();
     }
 
     HRegion.closeHRegion(this.region);
@@ -4532,7 +4562,7 @@ public class TestHRegion {
     // create a primary region, load some data and flush
     // create a secondary region, and do a get against that
     Path rootDir = new Path(dir + "testRegionReplicaSecondary");
-    TEST_UTIL.getConfiguration().set(HConstants.HBASE_DIR, rootDir.toString());
+    FSUtils.setRootDir(TEST_UTIL.getConfiguration(), rootDir); 
 
     byte[][] families = new byte[][] {
         Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3")
@@ -4582,7 +4612,7 @@ public class TestHRegion {
     // create a primary region, load some data and flush
     // create a secondary region, and do a put against that
     Path rootDir = new Path(dir + "testRegionReplicaSecondary");
-    TEST_UTIL.getConfiguration().set(HConstants.HBASE_DIR, rootDir.toString());
+    FSUtils.setRootDir(TEST_UTIL.getConfiguration(), rootDir);
 
     byte[][] families = new byte[][] {
         Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3")
@@ -4635,7 +4665,7 @@ public class TestHRegion {
   @Test
   public void testCompactionFromPrimary() throws IOException {
     Path rootDir = new Path(dir + "testRegionReplicaSecondary");
-    TEST_UTIL.getConfiguration().set(HConstants.HBASE_DIR, rootDir.toString());
+    FSUtils.setRootDir(TEST_UTIL.getConfiguration(), rootDir);
 
     byte[][] families = new byte[][] {
         Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3")
@@ -4889,8 +4919,9 @@ public class TestHRegion {
    */
   private static HRegion initHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
       String callingMethod, Configuration conf, boolean isReadOnly, Durability durability,
-      HLog hlog, byte[]... families) throws IOException {
-    return TEST_UTIL.createLocalHRegion(tableName, startKey, stopKey, callingMethod, conf, isReadOnly, durability, hlog, families);
+      WAL wal, byte[]... families) throws IOException {
+    return TEST_UTIL.createLocalHRegion(tableName, startKey, stopKey, callingMethod, conf,
+        isReadOnly, durability, wal, families);
   }
 
   /**
@@ -5503,7 +5534,7 @@ public class TestHRegion {
     HRegionInfo hri = new HRegionInfo(htd.getTableName(),
       HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY);
 
-    // open the region w/o rss and log and flush some files
+    // open the region w/o rss and wal and flush some files
     HRegion region =
          HRegion.createHRegion(hri, TEST_UTIL.getDataTestDir(), TEST_UTIL
             .getConfiguration(), htd);
@@ -5516,15 +5547,15 @@ public class TestHRegion {
 
     ArgumentCaptor<WALEdit> editCaptor = ArgumentCaptor.forClass(WALEdit.class);
 
-    // capture appendNoSync() calls
-    HLog log = mock(HLog.class);
-    when(rss.getWAL((HRegionInfo) any())).thenReturn(log);
+    // capture append() calls
+    WAL wal = mock(WAL.class);
+    when(rss.getWAL((HRegionInfo) any())).thenReturn(wal);
 
     try {
       region = HRegion.openHRegion(hri, htd, rss.getWAL(hri),
         TEST_UTIL.getConfiguration(), rss, null);
 
-      verify(log, times(1)).appendNoSync((HTableDescriptor)any(), (HRegionInfo)any(), (HLogKey)any()
+      verify(wal, times(1)).append((HTableDescriptor)any(), (HRegionInfo)any(), (WALKey)any()
         , editCaptor.capture(), (AtomicLong)any(), anyBoolean(), (List<Cell>)any());
 
       WALEdit edit = editCaptor.getValue();
@@ -5576,9 +5607,9 @@ public class TestHRegion {
 
     ArgumentCaptor<WALEdit> editCaptor = ArgumentCaptor.forClass(WALEdit.class);
 
-    // capture appendNoSync() calls
-    HLog log = mock(HLog.class);
-    when(rss.getWAL((HRegionInfo) any())).thenReturn(log);
+    // capture append() calls
+    WAL wal = mock(WAL.class);
+    when(rss.getWAL((HRegionInfo) any())).thenReturn(wal);
 
     // open a region first so that it can be closed later
     region = HRegion.openHRegion(hri, htd, rss.getWAL(hri),
@@ -5588,7 +5619,7 @@ public class TestHRegion {
     region.close(false);
 
     // 2 times, one for region open, the other close region
-    verify(log, times(2)).appendNoSync((HTableDescriptor)any(), (HRegionInfo)any(), (HLogKey)any(),
+    verify(wal, times(2)).append((HTableDescriptor)any(), (HRegionInfo)any(), (WALKey)any(),
       editCaptor.capture(), (AtomicLong)any(), anyBoolean(), (List<Cell>)any());
 
     WALEdit edit = editCaptor.getAllValues().get(1);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
index 0469f0e..03acfdc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
@@ -57,7 +57,7 @@ import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.After;
 import org.junit.Before;
@@ -110,9 +110,9 @@ public class TestMajorCompaction {
 
   @After
   public void tearDown() throws Exception {
-    HLog hlog = r.getLog();
+    WAL wal = r.getWAL();
     this.r.close();
-    hlog.closeAndDelete();
+    wal.close();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java
index 49d0445..7ac6eef 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java
@@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.After;
 import org.junit.Before;
@@ -90,9 +90,9 @@ public class TestMinorCompaction {
 
   @After
   public void tearDown() throws Exception {
-    HLog hlog = r.getLog();
+    WAL wal = r.getWAL();
     this.r.close();
-    hlog.closeAndDelete();
+    wal.close();
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java
index 9e65751..8b5b4a3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java
@@ -28,6 +28,7 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
@@ -43,11 +44,11 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
-import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
+import org.apache.hadoop.hbase.wal.WALFactory;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.zookeeper.KeeperException;
 import org.junit.After;
 import org.junit.Before;
@@ -69,7 +70,7 @@ public class TestRegionMergeTransaction {
   private HRegion region_a;
   private HRegion region_b;
   private HRegion region_c;
-  private HLog wal;
+  private WALFactory wals;
   private FileSystem fs;
   // Start rows of region_a,region_b,region_c
   private static final byte[] STARTROW_A = new byte[] { 'a', 'a', 'a' };
@@ -82,11 +83,12 @@ public class TestRegionMergeTransaction {
   public void setup() throws IOException {
     this.fs = FileSystem.get(TEST_UTIL.getConfiguration());
     this.fs.delete(this.testdir, true);
-    this.wal = HLogFactory.createHLog(fs, this.testdir, "logs",
-        TEST_UTIL.getConfiguration());
-    this.region_a = createRegion(this.testdir, this.wal, STARTROW_A, STARTROW_B);
-    this.region_b = createRegion(this.testdir, this.wal, STARTROW_B, STARTROW_C);
-    this.region_c = createRegion(this.testdir, this.wal, STARTROW_C, ENDROW);
+    final Configuration walConf = new Configuration(TEST_UTIL.getConfiguration());
+    FSUtils.setRootDir(walConf, this.testdir);
+    this.wals = new WALFactory(walConf, null, TestRegionMergeTransaction.class.getName());
+    this.region_a = createRegion(this.testdir, this.wals, STARTROW_A, STARTROW_B);
+    this.region_b = createRegion(this.testdir, this.wals, STARTROW_B, STARTROW_C);
+    this.region_c = createRegion(this.testdir, this.wals, STARTROW_C, ENDROW);
     assert region_a != null && region_b != null && region_c != null;
     TEST_UTIL.getConfiguration().setBoolean("hbase.testing.nocluster", true);
   }
@@ -101,8 +103,9 @@ public class TestRegionMergeTransaction {
             + region.getRegionFileSystem().getRegionDir());
       }
     }
-    if (this.wal != null)
-      this.wal.closeAndDelete();
+    if (this.wals != null) {
+      this.wals.close();
+    }
     this.fs.delete(this.testdir, true);
   }
 
@@ -401,7 +404,7 @@ public class TestRegionMergeTransaction {
   private class MockedFailedMergedRegionOpen extends IOException {
   }
 
-  private HRegion createRegion(final Path testdir, final HLog wal,
+  private HRegion createRegion(final Path testdir, final WALFactory wals,
       final byte[] startrow, final byte[] endrow)
       throws IOException {
     // Make a region with start and end keys.
@@ -412,7 +415,7 @@ public class TestRegionMergeTransaction {
     HRegion a = HRegion.createHRegion(hri, testdir,
         TEST_UTIL.getConfiguration(), htd);
     HRegion.closeHRegion(a);
-    return HRegion.openHRegion(testdir, hri, htd, wal,
+    return HRegion.openHRegion(testdir, hri, htd, wals.getWAL(hri.getEncodedNameAsBytes()),
         TEST_UTIL.getConfiguration());
   }
 
@@ -457,7 +460,7 @@ public class TestRegionMergeTransaction {
           }
           Put put = new Put(k);
           put.add(f, null, k);
-          if (r.getLog() == null)
+          if (r.getWAL() == null)
             put.setDurability(Durability.SKIP_WAL);
           r.put(put);
           rowCount++;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
index 4479ce6..ca97c3e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
@@ -28,6 +28,7 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
@@ -45,11 +46,11 @@ import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
-import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
+import org.apache.hadoop.hbase.wal.WALFactory;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.PairOfSameType;
 import org.apache.zookeeper.KeeperException;
 import org.junit.After;
@@ -70,7 +71,7 @@ public class TestSplitTransaction {
   private final Path testdir =
     TEST_UTIL.getDataTestDir(this.getClass().getName());
   private HRegion parent;
-  private HLog wal;
+  private WALFactory wals;
   private FileSystem fs;
   private static final byte [] STARTROW = new byte [] {'a', 'a', 'a'};
   // '{' is next ascii after 'z'.
@@ -85,10 +86,11 @@ public class TestSplitTransaction {
     this.fs = FileSystem.get(TEST_UTIL.getConfiguration());
     TEST_UTIL.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, CustomObserver.class.getName());
     this.fs.delete(this.testdir, true);
-    this.wal = HLogFactory.createHLog(fs, this.testdir, "logs",
-      TEST_UTIL.getConfiguration());
+    final Configuration walConf = new Configuration(TEST_UTIL.getConfiguration());
+    FSUtils.setRootDir(walConf, this.testdir);
+    this.wals = new WALFactory(walConf, null, this.getClass().getName());
     
-    this.parent = createRegion(this.testdir, this.wal);
+    this.parent = createRegion(this.testdir, this.wals);
     RegionCoprocessorHost host = new RegionCoprocessorHost(this.parent, null, TEST_UTIL.getConfiguration());
     this.parent.setCoprocessorHost(host);
     TEST_UTIL.getConfiguration().setBoolean("hbase.testing.nocluster", true);
@@ -100,7 +102,9 @@ public class TestSplitTransaction {
     if (this.fs.exists(regionDir) && !this.fs.delete(regionDir, true)) {
       throw new IOException("Failed delete of " + regionDir);
     }
-    if (this.wal != null) this.wal.closeAndDelete();
+    if (this.wals != null) {
+      this.wals.close();
+    }
     this.fs.delete(this.testdir, true);
   }
 
@@ -327,7 +331,7 @@ public class TestSplitTransaction {
     return rowcount;
   }
 
-  HRegion createRegion(final Path testdir, final HLog wal)
+  HRegion createRegion(final Path testdir, final WALFactory wals)
   throws IOException {
     // Make a region with start and end keys. Use 'aaa', to 'AAA'.  The load
     // region utility will add rows between 'aaa' and 'zzz'.
@@ -337,7 +341,7 @@ public class TestSplitTransaction {
     HRegionInfo hri = new HRegionInfo(htd.getTableName(), STARTROW, ENDROW);
     HRegion r = HRegion.createHRegion(hri, testdir, TEST_UTIL.getConfiguration(), htd);
     HRegion.closeHRegion(r);
-    return HRegion.openHRegion(testdir, hri, htd, wal,
+    return HRegion.openHRegion(testdir, hri, htd, wals.getWAL(hri.getEncodedNameAsBytes()),
       TEST_UTIL.getConfiguration());
   }
   

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
index be99f37..b5bc927 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
@@ -70,8 +70,8 @@ import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
 import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
-import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
+import org.apache.hadoop.hbase.wal.DefaultWALProvider;
+import org.apache.hadoop.hbase.wal.WALFactory;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -168,8 +168,7 @@ public class TestStore {
     //Setting up a Store
     Path basedir = new Path(DIR+methodName);
     Path tableDir = FSUtils.getTableDir(basedir, htd.getTableName());
-    String logName = "logs";
-    Path logdir = new Path(basedir, logName);
+    final Path logdir = new Path(basedir, DefaultWALProvider.getWALDirectoryName(methodName));
 
     FileSystem fs = FileSystem.get(conf);
 
@@ -181,8 +180,11 @@ public class TestStore {
       htd.addFamily(hcd);
     }
     HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
-    HLog hlog = HLogFactory.createHLog(fs, basedir, logName, conf);
-    HRegion region = new HRegion(tableDir, hlog, fs, conf, info, htd, null);
+    final Configuration walConf = new Configuration(conf);
+    FSUtils.setRootDir(walConf, basedir);
+    final WALFactory wals = new WALFactory(walConf, null, methodName);
+    HRegion region = new HRegion(tableDir, wals.getWAL(info.getEncodedNameAsBytes()), fs, conf,
+        info, htd, null);
 
     store = new HStore(region, hcd, conf);
     return store;
@@ -783,7 +785,7 @@ public class TestStore {
         LOG.info("After failed flush, we should still have no files!");
         files = store.getRegionFileSystem().getStoreFiles(store.getColumnFamilyName());
         Assert.assertEquals(0, files != null ? files.size() : 0);
-        store.getHRegion().getLog().closeAndDelete();
+        store.getHRegion().getWAL().close();
         return null;
       }
     });

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java
index e66a069..0319051 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java
@@ -46,7 +46,7 @@ import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
+import org.apache.hadoop.hbase.wal.WALFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.StoppableImplementation;
@@ -62,10 +62,10 @@ public class TestStoreFileRefresherChore {
   private Path testDir;
 
   @Before
-  public void setUp() {
+  public void setUp() throws IOException {
     TEST_UTIL = new HBaseTestingUtility();
     testDir = TEST_UTIL.getDataTestDir("TestStoreFileRefresherChore");
-    TEST_UTIL.getConfiguration().set(HConstants.HBASE_DIR, testDir.toString());
+    FSUtils.setRootDir(TEST_UTIL.getConfiguration(), testDir);
   }
 
   private HTableDescriptor getTableDesc(TableName tableName, byte[]... families) {
@@ -101,8 +101,10 @@ public class TestStoreFileRefresherChore {
     HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false, 0, replicaId);
 
     HRegionFileSystem fs = new FailingHRegionFileSystem(conf, tableDir.getFileSystem(conf), tableDir, info);
-    HRegion region = new HRegion(fs, HLogFactory.createHLog(fs.getFileSystem(),
-      tableDir, "log_" + replicaId, conf), conf, htd, null);
+    final Configuration walConf = new Configuration(conf);
+    FSUtils.setRootDir(walConf, tableDir);
+    final WALFactory wals = new WALFactory(walConf, null, "log_" + replicaId);
+    HRegion region = new HRegion(fs, wals.getWAL(info.getEncodedNameAsBytes()), conf, htd, null);
 
     region.initialize();
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultyHLog.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultyHLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultyHLog.java
deleted file mode 100644
index 10ba82f..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultyHLog.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.regionserver.wal;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.UUID;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-
-/*
- * This is a utility class, used by tests, which fails operation specified by FailureType enum
- */
-public class FaultyHLog extends FSHLog {
-  public enum FailureType {
-    NONE, APPENDNOSYNC, SYNC
-  }
-  FailureType ft = FailureType.NONE;
-
-  public FaultyHLog(FileSystem fs, Path rootDir, String logName, Configuration conf)
-      throws IOException {
-    super(fs, rootDir, logName, conf);
-  }
-  
-  public void setFailureType(FailureType fType) {
-    this.ft = fType;
-  }
-  
-  @Override
-  public void sync(long txid) throws IOException {
-    if (this.ft == FailureType.SYNC) {
-      throw new IOException("sync");
-    }
-    super.sync(txid);
-  }
-  @Override
-  public long appendNoSync(HRegionInfo info, TableName tableName, WALEdit edits,
-      List<UUID> clusterIds, final long now, HTableDescriptor htd, AtomicLong sequenceId,
-      boolean isInMemstore, long nonceGroup, long nonce) throws IOException {
-    if (this.ft == FailureType.APPENDNOSYNC) {
-      throw new IOException("appendNoSync");
-    }
-    return super.appendNoSync(info, tableName, edits, clusterIds, now, htd, sequenceId,
-      isInMemstore, nonceGroup, nonce);
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultySequenceFileLogReader.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultySequenceFileLogReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultySequenceFileLogReader.java
index 2164a43..a0e4490 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultySequenceFileLogReader.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultySequenceFileLogReader.java
@@ -23,12 +23,12 @@ import java.io.IOException;
 import java.util.LinkedList;
 import java.util.Queue;
 
-import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry;
-import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
+import org.apache.hadoop.hbase.wal.WAL.Entry;
 
 public class FaultySequenceFileLogReader extends SequenceFileLogReader {
 
-  enum FailureType {
+  // public until class relocates to o.a.h.h.wal
+  public enum FailureType {
     BEGINNING, MIDDLE, END, NONE
   }
 
@@ -40,17 +40,17 @@ public class FaultySequenceFileLogReader extends SequenceFileLogReader {
   }
 
   @Override
-  public HLog.Entry next(HLog.Entry reuse) throws IOException {
+  public Entry next(Entry reuse) throws IOException {
     this.entryStart = this.getPosition();
     boolean b = true;
 
     if (nextQueue.isEmpty()) { // Read the whole thing at once and fake reading
       while (b == true) {
-        HLog.Entry e = new HLog.Entry(new HLogKey(), new WALEdit());
+        Entry e = new Entry(new HLogKey(), new WALEdit());
         if (compressionContext != null) {
           e.setCompressionContext(compressionContext);
         }
-        b = this.reader.next(e.getKey(), e.getEdit());
+        b = readNext(e);
         nextQueue.offer(e);
         numberOfFileEntries++;
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogPerformanceEvaluation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogPerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogPerformanceEvaluation.java
deleted file mode 100644
index f56ef98..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogPerformanceEvaluation.java
+++ /dev/null
@@ -1,566 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.regionserver.wal;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry;
-import org.apache.hadoop.hbase.trace.SpanReceiverHost;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-import org.htrace.Sampler;
-import org.htrace.Trace;
-import org.htrace.TraceScope;
-import org.htrace.impl.ProbabilitySampler;
-
-import com.yammer.metrics.core.Histogram;
-import com.yammer.metrics.core.Meter;
-import com.yammer.metrics.core.MetricsRegistry;
-import com.yammer.metrics.reporting.ConsoleReporter;
-
-/**
- * This class runs performance benchmarks for {@link HLog}.
- * See usage for this tool by running:
- * <code>$ hbase org.apache.hadoop.hbase.regionserver.wal.HLogPerformanceEvaluation -h</code>
- */
-@InterfaceAudience.Private
-public final class HLogPerformanceEvaluation extends Configured implements Tool {
-  static final Log LOG = LogFactory.getLog(HLogPerformanceEvaluation.class.getName());
-  private final MetricsRegistry metrics = new MetricsRegistry();
-  private final Meter syncMeter =
-    metrics.newMeter(HLogPerformanceEvaluation.class, "syncMeter", "syncs", TimeUnit.MILLISECONDS);
-  private final Histogram syncHistogram =
-    metrics.newHistogram(HLogPerformanceEvaluation.class, "syncHistogram", "nanos-between-syncs",
-      true);
-  private final Histogram syncCountHistogram =
-      metrics.newHistogram(HLogPerformanceEvaluation.class, "syncCountHistogram", "countPerSync",
-        true);
-  private final Meter appendMeter =
-    metrics.newMeter(HLogPerformanceEvaluation.class, "appendMeter", "bytes",
-      TimeUnit.MILLISECONDS);
-  private final Histogram latencyHistogram =
-    metrics.newHistogram(HLogPerformanceEvaluation.class, "latencyHistogram", "nanos", true);
-
-  private HBaseTestingUtility TEST_UTIL;
-
-  static final String TABLE_NAME = "HLogPerformanceEvaluation";
-  static final String QUALIFIER_PREFIX = "q";
-  static final String FAMILY_PREFIX = "cf";
-
-  private int numQualifiers = 1;
-  private int valueSize = 512;
-  private int keySize = 16;
-
-  @Override
-  public void setConf(Configuration conf) {
-    super.setConf(conf);
-    TEST_UTIL = new HBaseTestingUtility(conf);
-  }
-
-  /**
-   * Perform HLog.append() of Put object, for the number of iterations requested.
-   * Keys and Vaues are generated randomly, the number of column families,
-   * qualifiers and key/value size is tunable by the user.
-   */
-  class HLogPutBenchmark implements Runnable {
-    private final long numIterations;
-    private final int numFamilies;
-    private final boolean noSync;
-    private final HRegion region;
-    private final int syncInterval;
-    private final HTableDescriptor htd;
-    private final Sampler loopSampler;
-
-    HLogPutBenchmark(final HRegion region, final HTableDescriptor htd,
-        final long numIterations, final boolean noSync, final int syncInterval,
-        final double traceFreq) {
-      this.numIterations = numIterations;
-      this.noSync = noSync;
-      this.syncInterval = syncInterval;
-      this.numFamilies = htd.getColumnFamilies().length;
-      this.region = region;
-      this.htd = htd;
-      String spanReceivers = getConf().get("hbase.trace.spanreceiver.classes");
-      if (spanReceivers == null || spanReceivers.isEmpty()) {
-        loopSampler = Sampler.NEVER;
-      } else {
-        if (traceFreq <= 0.0) {
-          LOG.warn("Tracing enabled but traceFreq=0.");
-          loopSampler = Sampler.NEVER;
-        } else if (traceFreq >= 1.0) {
-          loopSampler = Sampler.ALWAYS;
-          if (numIterations > 1000) {
-            LOG.warn("Full tracing of all iterations will produce a lot of data. Be sure your"
-              + " SpanReciever can keep up.");
-          }
-        } else {
-          loopSampler = new ProbabilitySampler(traceFreq);
-        }
-      }
-    }
-
-    @Override
-    public void run() {
-      byte[] key = new byte[keySize];
-      byte[] value = new byte[valueSize];
-      Random rand = new Random(Thread.currentThread().getId());
-      HLog hlog = region.getLog();
-      ArrayList<UUID> clusters = new ArrayList<UUID>();
-      long nonce = HConstants.NO_NONCE;
-
-      TraceScope threadScope =
-        Trace.startSpan("HLogPerfEval." + Thread.currentThread().getName());
-      try {
-        long startTime = System.currentTimeMillis();
-        int lastSync = 0;
-        for (int i = 0; i < numIterations; ++i) {
-          assert Trace.currentSpan() == threadScope.getSpan() : "Span leak detected.";
-          TraceScope loopScope = Trace.startSpan("runLoopIter" + i, loopSampler);
-          try {
-            long now = System.nanoTime();
-            Put put = setupPut(rand, key, value, numFamilies);
-            WALEdit walEdit = new WALEdit();
-            addFamilyMapToWALEdit(put.getFamilyCellMap(), walEdit);
-            HRegionInfo hri = region.getRegionInfo();
-            hlog.appendNoSync(hri, hri.getTable(), walEdit, clusters, now, htd,
-              region.getSequenceId(), true, nonce, nonce);
-            if (!this.noSync) {
-              if (++lastSync >= this.syncInterval) {
-                hlog.sync();
-                lastSync = 0;
-              }
-            }
-            latencyHistogram.update(System.nanoTime() - now);
-          } finally {
-            loopScope.close();
-          }
-        }
-        long totalTime = (System.currentTimeMillis() - startTime);
-        logBenchmarkResult(Thread.currentThread().getName(), numIterations, totalTime);
-      } catch (Exception e) {
-        LOG.error(getClass().getSimpleName() + " Thread failed", e);
-      } finally {
-        threadScope.close();
-      }
-    }
-  }
-
-  @Override
-  public int run(String[] args) throws Exception {
-    Path rootRegionDir = null;
-    int numThreads = 1;
-    long numIterations = 1000000;
-    int numFamilies = 1;
-    int syncInterval = 0;
-    boolean noSync = false;
-    boolean verify = false;
-    boolean verbose = false;
-    boolean cleanup = true;
-    boolean noclosefs = false;
-    long roll = Long.MAX_VALUE;
-    boolean compress = false;
-    String cipher = null;
-    String spanReceivers = getConf().get("hbase.trace.spanreceiver.classes");
-    boolean trace = spanReceivers != null && !spanReceivers.isEmpty();
-    double traceFreq = 1.0;
-    // Process command line args
-    for (int i = 0; i < args.length; i++) {
-      String cmd = args[i];
-      try {
-        if (cmd.equals("-threads")) {
-          numThreads = Integer.parseInt(args[++i]);
-        } else if (cmd.equals("-iterations")) {
-          numIterations = Long.parseLong(args[++i]);
-        } else if (cmd.equals("-path")) {
-          rootRegionDir = new Path(args[++i]);
-        } else if (cmd.equals("-families")) {
-          numFamilies = Integer.parseInt(args[++i]);
-        } else if (cmd.equals("-qualifiers")) {
-          numQualifiers = Integer.parseInt(args[++i]);
-        } else if (cmd.equals("-keySize")) {
-          keySize = Integer.parseInt(args[++i]);
-        } else if (cmd.equals("-valueSize")) {
-          valueSize = Integer.parseInt(args[++i]);
-        } else if (cmd.equals("-syncInterval")) {
-          syncInterval = Integer.parseInt(args[++i]);
-        } else if (cmd.equals("-nosync")) {
-          noSync = true;
-        } else if (cmd.equals("-verify")) {
-          verify = true;
-        } else if (cmd.equals("-verbose")) {
-          verbose = true;
-        } else if (cmd.equals("-nocleanup")) {
-          cleanup = false;
-        } else if (cmd.equals("-noclosefs")) {
-          noclosefs = true;
-        } else if (cmd.equals("-roll")) {
-          roll = Long.parseLong(args[++i]);
-        } else if (cmd.equals("-compress")) {
-          compress = true;
-        } else if (cmd.equals("-encryption")) {
-          cipher = args[++i];
-        } else if (cmd.equals("-traceFreq")) {
-          traceFreq = Double.parseDouble(args[++i]);
-        } else if (cmd.equals("-h")) {
-          printUsageAndExit();
-        } else if (cmd.equals("--help")) {
-          printUsageAndExit();
-        } else {
-          System.err.println("UNEXPECTED: " + cmd);
-          printUsageAndExit();
-        }
-      } catch (Exception e) {
-        printUsageAndExit();
-      }
-    }
-
-    if (compress) {
-      Configuration conf = getConf();
-      conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true);
-    }
-
-    if (cipher != null) {
-      // Set up HLog for encryption
-      Configuration conf = getConf();
-      conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName());
-      conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase");
-      conf.setClass("hbase.regionserver.hlog.reader.impl", SecureProtobufLogReader.class,
-        HLog.Reader.class);
-      conf.setClass("hbase.regionserver.hlog.writer.impl", SecureProtobufLogWriter.class,
-        HLog.Writer.class);
-      conf.setBoolean(HConstants.ENABLE_WAL_ENCRYPTION, true);
-      conf.set(HConstants.CRYPTO_WAL_ALGORITHM_CONF_KEY, cipher);
-    }
-
-    // Internal config. goes off number of threads; if more threads than handlers, stuff breaks.
-    // In regionserver, number of handlers == number of threads.
-    getConf().setInt(HConstants.REGION_SERVER_HANDLER_COUNT, numThreads);
-
-    // Run HLog Performance Evaluation
-    // First set the fs from configs.  In case we are on hadoop1
-    FSUtils.setFsDefault(getConf(), FSUtils.getRootDir(getConf()));
-    FileSystem fs = FileSystem.get(getConf());
-    LOG.info("FileSystem: " + fs);
-
-    SpanReceiverHost receiverHost = trace ? SpanReceiverHost.getInstance(getConf()) : null;
-    TraceScope scope = Trace.startSpan("HLogPerfEval", trace ? Sampler.ALWAYS : Sampler.NEVER);
-
-    try {
-      if (rootRegionDir == null) {
-        rootRegionDir = TEST_UTIL.getDataTestDirOnTestFS("HLogPerformanceEvaluation");
-      }
-      rootRegionDir = rootRegionDir.makeQualified(fs);
-      cleanRegionRootDir(fs, rootRegionDir);
-      // Initialize Table Descriptor
-      HTableDescriptor htd = createHTableDescriptor(numFamilies);
-      final long whenToRoll = roll;
-      final HLog hlog = new FSHLog(fs, rootRegionDir, "wals", getConf()) {
-
-        @Override
-        public void postSync(final long timeInNanos, final int handlerSyncs) {
-          super.postSync(timeInNanos, handlerSyncs);
-          syncMeter.mark();
-          syncHistogram.update(timeInNanos);
-          syncCountHistogram.update(handlerSyncs);
-        }
-
-        @Override
-        public long postAppend(final HLog.Entry entry, final long elapsedTime) {
-          long size = super.postAppend(entry, elapsedTime);
-          appendMeter.mark(size);
-          return size;
-        }
-      };
-      hlog.registerWALActionsListener(new WALActionsListener() {
-        private int appends = 0;
-
-        @Override
-        public void visitLogEntryBeforeWrite(HTableDescriptor htd, HLogKey logKey,
-            WALEdit logEdit) {
-          this.appends++;
-          if (this.appends % whenToRoll == 0) {
-            LOG.info("Rolling after " + appends + " edits");
-            // We used to do explicit call to rollWriter but changed it to a request
-            // to avoid dead lock (there are less threads going on in this class than
-            // in the regionserver -- regionserver does not have the issue).
-            ((FSHLog)hlog).requestLogRoll();
-          }
-        }
-
-        @Override
-        public void visitLogEntryBeforeWrite(HRegionInfo info, HLogKey logKey, WALEdit logEdit) {
-        }
-
-        @Override
-        public void preLogRoll(Path oldPath, Path newPath) throws IOException {
-        }
-
-        @Override
-        public void preLogArchive(Path oldPath, Path newPath) throws IOException {
-        }
-
-        @Override
-        public void postLogRoll(Path oldPath, Path newPath) throws IOException {
-        }
-
-        @Override
-        public void postLogArchive(Path oldPath, Path newPath) throws IOException {
-        }
-
-        @Override
-        public void logRollRequested() {
-        }
-
-        @Override
-        public void logCloseRequested() {
-        }
-      });
-      hlog.rollWriter();
-      HRegion region = null;
-
-      try {
-        region = openRegion(fs, rootRegionDir, htd, hlog);
-        ConsoleReporter.enable(this.metrics, 30, TimeUnit.SECONDS);
-        long putTime =
-          runBenchmark(Trace.wrap(
-              new HLogPutBenchmark(region, htd, numIterations, noSync, syncInterval, traceFreq)),
-            numThreads);
-        logBenchmarkResult("Summary: threads=" + numThreads + ", iterations=" + numIterations +
-          ", syncInterval=" + syncInterval, numIterations * numThreads, putTime);
-        
-        if (region != null) {
-          closeRegion(region);
-          region = null;
-        }
-        if (verify) {
-          Path dir = ((FSHLog) hlog).getDir();
-          long editCount = 0;
-          FileStatus [] fsss = fs.listStatus(dir);
-          if (fsss.length == 0) throw new IllegalStateException("No WAL found");
-          for (FileStatus fss: fsss) {
-            Path p = fss.getPath();
-            if (!fs.exists(p)) throw new IllegalStateException(p.toString());
-            editCount += verify(p, verbose);
-          }
-          long expected = numIterations * numThreads;
-          if (editCount != expected) {
-            throw new IllegalStateException("Counted=" + editCount + ", expected=" + expected);
-          }
-        }
-      } finally {
-        if (region != null) closeRegion(region);
-        // Remove the root dir for this test region
-        if (cleanup) cleanRegionRootDir(fs, rootRegionDir);
-      }
-    } finally {
-      // We may be called inside a test that wants to keep on using the fs.
-      if (!noclosefs) fs.close();
-      scope.close();
-      if (receiverHost != null) receiverHost.closeReceivers();
-    }
-
-    return(0);
-  }
-
-  private static HTableDescriptor createHTableDescriptor(final int numFamilies) {
-    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE_NAME));
-    for (int i = 0; i < numFamilies; ++i) {
-      HColumnDescriptor colDef = new HColumnDescriptor(FAMILY_PREFIX + i);
-      htd.addFamily(colDef);
-    }
-    return htd;
-  }
-
-  /**
-   * Verify the content of the WAL file.
-   * Verify that the file has expected number of edits.
-   * @param wal
-   * @return Count of edits.
-   * @throws IOException
-   */
-  private long verify(final Path wal, final boolean verbose) throws IOException {
-    HLog.Reader reader = HLogFactory.createReader(wal.getFileSystem(getConf()), wal, getConf());
-    long count = 0;
-    Map<String, Long> sequenceIds = new HashMap<String, Long>();
-    try {
-      while (true) {
-        Entry e = reader.next();
-        if (e == null) {
-          LOG.debug("Read count=" + count + " from " + wal);
-          break;
-        }
-        count++;
-        long seqid = e.getKey().getLogSeqNum();
-        if (sequenceIds.containsKey(Bytes.toString(e.getKey().getEncodedRegionName()))) {
-          // sequenceIds should be increasing for every regions
-          if (sequenceIds.get(Bytes.toString(e.getKey().getEncodedRegionName())) >= seqid) {
-            throw new IllegalStateException("wal = " + wal.getName() + ", " + "previous seqid = "
-                + sequenceIds.get(Bytes.toString(e.getKey().getEncodedRegionName()))
-                + ", current seqid = " + seqid);
-          }
-        }
-        // update the sequence Id.
-        sequenceIds.put(Bytes.toString(e.getKey().getEncodedRegionName()), seqid);
-        if (verbose) LOG.info("seqid=" + seqid);
-      }
-    } finally {
-      reader.close();
-    }
-    return count;
-  }
-
-  private static void logBenchmarkResult(String testName, long numTests, long totalTime) {
-    float tsec = totalTime / 1000.0f;
-    LOG.info(String.format("%s took %.3fs %.3fops/s", testName, tsec, numTests / tsec));
-    
-  }
-
-  private void printUsageAndExit() {
-    System.err.printf("Usage: bin/hbase %s [options]\n", getClass().getName());
-    System.err.println(" where [options] are:");
-    System.err.println("  -h|-help         Show this help and exit.");
-    System.err.println("  -threads <N>     Number of threads writing on the WAL.");
-    System.err.println("  -iterations <N>  Number of iterations per thread.");
-    System.err.println("  -path <PATH>     Path where region's root directory is created.");
-    System.err.println("  -families <N>    Number of column families to write.");
-    System.err.println("  -qualifiers <N>  Number of qualifiers to write.");
-    System.err.println("  -keySize <N>     Row key size in byte.");
-    System.err.println("  -valueSize <N>   Row/Col value size in byte.");
-    System.err.println("  -nocleanup       Do NOT remove test data when done.");
-    System.err.println("  -noclosefs       Do NOT close the filesystem when done.");
-    System.err.println("  -nosync          Append without syncing");
-    System.err.println("  -syncInterval <N> Append N edits and then sync. " +
-      "Default=0, i.e. sync every edit.");
-    System.err.println("  -verify          Verify edits written in sequence");
-    System.err.println("  -verbose         Output extra info; " +
-      "e.g. all edit seq ids when verifying");
-    System.err.println("  -roll <N>        Roll the way every N appends");
-    System.err.println("  -encryption <A>  Encrypt the WAL with algorithm A, e.g. AES");
-    System.err.println("  -traceFreq <N>   Rate of trace sampling. Default: 1.0, " +
-      "only respected when tracing is enabled, ie -Dhbase.trace.spanreceiver.classes=...");
-    System.err.println("");
-    System.err.println("Examples:");
-    System.err.println("");
-    System.err.println(" To run 100 threads on hdfs with log rolling every 10k edits and " +
-      "verification afterward do:");
-    System.err.println(" $ ./bin/hbase org.apache.hadoop.hbase.regionserver.wal." +
-      "HLogPerformanceEvaluation \\");
-    System.err.println("    -conf ./core-site.xml -path hdfs://example.org:7000/tmp " +
-      "-threads 100 -roll 10000 -verify");
-    System.exit(1);
-  }
-
-  private HRegion openRegion(final FileSystem fs, final Path dir, final HTableDescriptor htd,
-      final HLog hlog)
-  throws IOException {
-    // Initialize HRegion
-    HRegionInfo regionInfo = new HRegionInfo(htd.getTableName());
-    return HRegion.createHRegion(regionInfo, dir, getConf(), htd, hlog);
-  }
-
-  private void closeRegion(final HRegion region) throws IOException {
-    if (region != null) {
-      region.close();
-      HLog wal = region.getLog();
-      if (wal != null) wal.close();
-    }
-  }
-
-  private void cleanRegionRootDir(final FileSystem fs, final Path dir) throws IOException {
-    if (fs.exists(dir)) {
-      fs.delete(dir, true);
-    }
-  }
-
-  private Put setupPut(Random rand, byte[] key, byte[] value, final int numFamilies) {
-    rand.nextBytes(key);
-    Put put = new Put(key);
-    for (int cf = 0; cf < numFamilies; ++cf) {
-      for (int q = 0; q < numQualifiers; ++q) {
-        rand.nextBytes(value);
-        put.add(Bytes.toBytes(FAMILY_PREFIX + cf), Bytes.toBytes(QUALIFIER_PREFIX + q), value);
-      }
-    }
-    return put;
-  }
-
-  private void addFamilyMapToWALEdit(Map<byte[], List<Cell>> familyMap,
-      WALEdit walEdit) {
-    for (List<Cell> edits : familyMap.values()) {
-      for (Cell cell : edits) {
-        walEdit.add(cell);
-      }
-    }
-  }
-
-  private long runBenchmark(Runnable runnable, final int numThreads) throws InterruptedException {
-    Thread[] threads = new Thread[numThreads];
-    long startTime = System.currentTimeMillis();
-    for (int i = 0; i < numThreads; ++i) {
-      threads[i] = new Thread(runnable, "t" + i);
-      threads[i].start();
-    }
-    for (Thread t : threads) t.join();
-    long endTime = System.currentTimeMillis();
-    return(endTime - startTime);
-  }
-
-  /**
-   * The guts of the {@link #main} method.
-   * Call this method to avoid the {@link #main(String[])} System.exit.
-   * @param args
-   * @return errCode
-   * @throws Exception
-   */
-  static int innerMain(final Configuration c, final String [] args) throws Exception {
-    return ToolRunner.run(c, new HLogPerformanceEvaluation(), args);
-  }
-
-  public static void main(String[] args) throws Exception {
-     System.exit(innerMain(HBaseConfiguration.create(), args));
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogUtilsForTests.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogUtilsForTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogUtilsForTests.java
deleted file mode 100644
index f2fcf80..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogUtilsForTests.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver.wal;
-
-
-/**
- * An Utility testcase that returns the number of log files that
- * were rolled to be accessed from outside packages.
- * 
- * This class makes available methods that are package protected.
- *  This is interesting for test only.
- */
-public class HLogUtilsForTests {
-  
-  /**
-   * 
-   * @param log
-   * @return
-   */
-  public static int getNumRolledLogFiles(HLog log) {
-    return ((FSHLog) log).getNumRolledLogFiles();
-  }
-
-  public static int getNumEntries(HLog log) {
-    return ((FSHLog) log).getNumEntries();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/InstrumentedSequenceFileLogWriter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/InstrumentedSequenceFileLogWriter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/InstrumentedSequenceFileLogWriter.java
deleted file mode 100644
index d240e66..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/InstrumentedSequenceFileLogWriter.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver.wal;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.util.Bytes;
-
-public class InstrumentedSequenceFileLogWriter extends ProtobufLogWriter {
-
-  public InstrumentedSequenceFileLogWriter() {
-    super();
-  }
-
-  public static boolean activateFailure = false;
-  @Override
-    public void append(HLog.Entry entry) throws IOException {
-      super.append(entry);
-      if (activateFailure && Bytes.equals(entry.getKey().getEncodedRegionName(), "break".getBytes())) {
-        System.out.println(getClass().getName() + ": I will throw an exception now...");
-        throw(new IOException("This exception is instrumented and should only be thrown for testing"));
-      }
-    }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java
index 221f76e..7c13c00 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java
@@ -32,8 +32,9 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALTrailer;
 import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALProvider;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.SequenceFile.CompressionType;
 import org.apache.hadoop.io.SequenceFile.Metadata;
@@ -42,8 +43,13 @@ import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.io.compress.DefaultCodec;
 
 /**
- * Implementation of {@link HLog.Writer} that delegates to
+ * Implementation of {@link WALProvider.Writer} that delegates to
  * SequenceFile.Writer. Legacy implementation only used for compat tests.
+ *
+ * Note that because this class writes to the legacy hadoop-specific SequenceFile
+ * format, users of it must write {@link HLogKey} keys and not arbitrary
+ * {@link WALKey}s because the latter are not Writables (nor made to work with
+ * Hadoop serialization).
  */
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
 public class SequenceFileLogWriter extends WriterBase {
@@ -163,7 +169,7 @@ public class SequenceFileLogWriter extends WriterBase {
   }
 
   @Override
-  public void append(HLog.Entry entry) throws IOException {
+  public void append(WAL.Entry entry) throws IOException {
     entry.setCompressionContext(compressionContext);
     try {
       this.writer.append(entry.getKey(), entry.getEdit());
@@ -213,11 +219,4 @@ public class SequenceFileLogWriter extends WriterBase {
   public FSDataOutputStream getWriterFSDataOutputStream() {
     return this.writer_out;
   }
-
-  /**
-   * This method is empty as trailer is added only in Protobuf based hlog readers/writers.
-   */
-  @Override
-  public void setWALTrailer(WALTrailer walTrailer) {
-  }
 }


Mime
View raw message