hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r1393126 [4/4] - in /hbase/trunk/hbase-server/src: main/java/org/apache/hadoop/hbase/backup/example/ main/java/org/apache/hadoop/hbase/fs/ main/java/org/apache/hadoop/hbase/mapreduce/ main/java/org/apache/hadoop/hbase/master/ main/java/org/...
Date Tue, 02 Oct 2012 19:29:21 GMT
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java
(original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java
Tue Oct  2 19:29:19 2012
@@ -37,6 +37,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.*;
 import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader;
+import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.Coprocessor;
@@ -158,7 +159,8 @@ public class TestHLog  {
     final byte [] tableName = Bytes.toBytes(getName());
     final byte [] rowName = tableName;
     Path logdir = new Path(hbaseDir, HConstants.HREGION_LOGDIR_NAME);
-    HLog log = new HLog(fs, logdir, oldLogDir, conf);
+    HLog log = HLogFactory.createHLog(fs, hbaseDir, 
+        HConstants.HREGION_LOGDIR_NAME, conf);
     final int howmany = 3;
     HRegionInfo[] infos = new HRegionInfo[3];
     Path tabledir = new Path(hbaseDir, getName());
@@ -235,8 +237,9 @@ public class TestHLog  {
     assertEquals(bytes.length, read);
     out.close();
     in.close();
-    Path subdir = new Path(dir, "hlogdir");
-    HLog wal = new HLog(fs, subdir, oldLogDir, conf);
+
+    HLog wal = HLogFactory.createHLog(fs, dir, "hlogdir", conf);
+    
     final int total = 20;
     HLog.Reader reader = null;
 
@@ -255,8 +258,8 @@ public class TestHLog  {
       // gives you EOFE.
       wal.sync();
       // Open a Reader.
-      Path walPath = wal.computeFilename();
-      reader = HLog.getReader(fs, walPath, conf);
+      Path walPath = ((FSHLog) wal).computeFilename();
+      reader = HLogFactory.createReader(fs, walPath, conf);
       int count = 0;
       HLog.Entry entry = new HLog.Entry();
       while ((entry = reader.next(entry)) != null) count++;
@@ -269,14 +272,14 @@ public class TestHLog  {
         kvs.add(new KeyValue(Bytes.toBytes(i), bytes, bytes));
         wal.append(info, bytes, kvs, System.currentTimeMillis(), htd);
       }
-      reader = HLog.getReader(fs, walPath, conf);
+      reader = HLogFactory.createReader(fs, walPath, conf);
       count = 0;
       while((entry = reader.next(entry)) != null) count++;
       assertTrue(count >= total);
       reader.close();
       // If I sync, should see double the edits.
       wal.sync();
-      reader = HLog.getReader(fs, walPath, conf);
+      reader = HLogFactory.createReader(fs, walPath, conf);
       count = 0;
       while((entry = reader.next(entry)) != null) count++;
       assertEquals(total * 2, count);
@@ -290,14 +293,14 @@ public class TestHLog  {
       }
       // Now I should have written out lots of blocks.  Sync then read.
       wal.sync();
-      reader = HLog.getReader(fs, walPath, conf);
+      reader = HLogFactory.createReader(fs, walPath, conf);
       count = 0;
       while((entry = reader.next(entry)) != null) count++;
       assertEquals(total * 3, count);
       reader.close();
       // Close it and ensure that closed, Reader gets right length also.
       wal.close();
-      reader = HLog.getReader(fs, walPath, conf);
+      reader = HLogFactory.createReader(fs, walPath, conf);
       count = 0;
       while((entry = reader.next(entry)) != null) count++;
       assertEquals(total * 3, count);
@@ -320,11 +323,11 @@ public class TestHLog  {
       regionsToSeqids.put(l.toString().getBytes(), l);
     }
     byte [][] regions =
-      HLog.findMemstoresWithEditsEqualOrOlderThan(1, regionsToSeqids);
+      HLogUtil.findMemstoresWithEditsEqualOrOlderThan(1, regionsToSeqids);
     assertEquals(2, regions.length);
     assertTrue(Bytes.equals(regions[0], "0".getBytes()) ||
         Bytes.equals(regions[0], "1".getBytes()));
-    regions = HLog.findMemstoresWithEditsEqualOrOlderThan(3, regionsToSeqids);
+    regions = HLogUtil.findMemstoresWithEditsEqualOrOlderThan(3, regionsToSeqids);
     int count = 4;
     assertEquals(count, regions.length);
     // Regions returned are not ordered.
@@ -341,7 +344,7 @@ public class TestHLog  {
     assertEquals(howmany, splits.size());
     for (int i = 0; i < splits.size(); i++) {
       LOG.info("Verifying=" + splits.get(i));
-      HLog.Reader reader = HLog.getReader(fs, splits.get(i), conf);
+      HLog.Reader reader = HLogFactory.createReader(fs, splits.get(i), conf);
       try {
         int count = 0;
         String previousRegion = null;
@@ -377,9 +380,9 @@ public class TestHLog  {
     byte [] tableName = Bytes.toBytes(getName());
     HRegionInfo regioninfo = new HRegionInfo(tableName,
              HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false);
-    Path subdir = new Path(dir, "hlogdir");
-    Path archdir = new Path(dir, "hlogdir_archive");
-    HLog wal = new HLog(fs, subdir, archdir, conf);
+
+    HLog wal = HLogFactory.createHLog(fs, dir, "hlogdir", 
+        "hlogdir_archive", conf);
     final int total = 20;
 
     HTableDescriptor htd = new HTableDescriptor();
@@ -393,7 +396,7 @@ public class TestHLog  {
     // Now call sync to send the data to HDFS datanodes
     wal.sync();
      int namenodePort = cluster.getNameNodePort();
-    final Path walPath = wal.computeFilename();
+    final Path walPath = ((FSHLog) wal).computeFilename();
     
 
     // Stop the cluster.  (ensure restart since we're sharing MiniDFSCluster)
@@ -477,7 +480,7 @@ public class TestHLog  {
     SequenceFile.Reader reader
       = new SequenceFile.Reader(this.fs, walPath, this.conf);
     int count = 0;
-    HLogKey key = HLog.newKey(conf);
+    HLogKey key = HLogUtil.newKey(conf);
     WALEdit val = new WALEdit();
     while (reader.next(key, val)) {
       count++;
@@ -500,7 +503,8 @@ public class TestHLog  {
     HLog.Reader reader = null;
     HLog log = null;
     try {
-      log = new HLog(fs, dir, oldLogDir, conf);
+      log = HLogFactory.createHLog(fs, hbaseDir, getName(), conf);
+      
       // Write columns named 1, 2, 3, etc. and then values of single byte
       // 1, 2, 3...
       long timestamp = System.currentTimeMillis();
@@ -520,10 +524,10 @@ public class TestHLog  {
       log.completeCacheFlush(info.getEncodedNameAsBytes(), tableName, logSeqId,
           info.isMetaRegion());
       log.close();
-      Path filename = log.computeFilename();
+      Path filename = ((FSHLog) log).computeFilename();
       log = null;
       // Now open a reader on the log and assert append worked.
-      reader = HLog.getReader(fs, filename, conf);
+      reader = HLogFactory.createReader(fs, filename, conf);
       // Above we added all columns on a single row so we only read one
       // entry in the below... thats why we have '1'.
       for (int i = 0; i < 1; i++) {
@@ -548,7 +552,7 @@ public class TestHLog  {
         KeyValue kv = val.getKeyValues().get(0);
         assertTrue(Bytes.equals(HLog.METAROW, kv.getRow()));
         assertTrue(Bytes.equals(HLog.METAFAMILY, kv.getFamily()));
-        assertEquals(0, Bytes.compareTo(HLog.COMPLETE_CACHE_FLUSH,
+        assertEquals(0, Bytes.compareTo(HLogUtil.COMPLETE_CACHE_FLUSH,
           val.getKeyValues().get(0).getValue()));
         System.out.println(key + " " + val);
       }
@@ -571,7 +575,7 @@ public class TestHLog  {
     final byte [] tableName = Bytes.toBytes("tablename");
     final byte [] row = Bytes.toBytes("row");
     Reader reader = null;
-    HLog log = new HLog(fs, dir, oldLogDir, conf);
+    HLog log = HLogFactory.createHLog(fs, hbaseDir, getName(), conf);
     try {
       // Write columns named 1, 2, 3, etc. and then values of single byte
       // 1, 2, 3...
@@ -590,10 +594,10 @@ public class TestHLog  {
       long logSeqId = log.startCacheFlush(hri.getEncodedNameAsBytes());
       log.completeCacheFlush(hri.getEncodedNameAsBytes(), tableName, logSeqId, false);
       log.close();
-      Path filename = log.computeFilename();
+      Path filename = ((FSHLog) log).computeFilename();
       log = null;
       // Now open a reader on the log and assert append worked.
-      reader = HLog.getReader(fs, filename, conf);
+      reader = HLogFactory.createReader(fs, filename, conf);
       HLog.Entry entry = reader.next();
       assertEquals(COL_COUNT, entry.getEdit().size());
       int idx = 0;
@@ -616,7 +620,7 @@ public class TestHLog  {
         assertTrue(Bytes.equals(tableName, entry.getKey().getTablename()));
         assertTrue(Bytes.equals(HLog.METAROW, val.getRow()));
         assertTrue(Bytes.equals(HLog.METAFAMILY, val.getFamily()));
-        assertEquals(0, Bytes.compareTo(HLog.COMPLETE_CACHE_FLUSH,
+        assertEquals(0, Bytes.compareTo(HLogUtil.COMPLETE_CACHE_FLUSH,
           val.getValue()));
         System.out.println(entry.getKey() + " " + val);
       }
@@ -639,7 +643,7 @@ public class TestHLog  {
     final int COL_COUNT = 10;
     final byte [] tableName = Bytes.toBytes("tablename");
     final byte [] row = Bytes.toBytes("row");
-    HLog log = new HLog(fs, dir, oldLogDir, conf);
+    HLog log = HLogFactory.createHLog(fs, hbaseDir, getName(), conf);
     try {
       DumbWALActionsListener visitor = new DumbWALActionsListener();
       log.registerWALActionsListener(visitor);
@@ -675,7 +679,8 @@ public class TestHLog  {
     final byte [] tableName = Bytes.toBytes("testLogCleaning");
     final byte [] tableName2 = Bytes.toBytes("testLogCleaning2");
 
-    HLog log = new HLog(fs, dir, oldLogDir, conf);
+    HLog log = HLogFactory.createHLog(fs, hbaseDir, 
+        getName(), conf);
     try {
       HRegionInfo hri = new HRegionInfo(tableName,
           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
@@ -686,12 +691,12 @@ public class TestHLog  {
       // Before HBASE-3198 it used to delete it
       addEdits(log, hri, tableName, 1);
       log.rollWriter();
-      assertEquals(1, log.getNumLogFiles());
+      assertEquals(1, ((FSHLog) log).getNumLogFiles());
 
       // See if there's anything wrong with more than 1 edit
       addEdits(log, hri, tableName, 2);
       log.rollWriter();
-      assertEquals(2, log.getNumLogFiles());
+      assertEquals(2, ((FSHLog) log).getNumLogFiles());
 
       // Now mix edits from 2 regions, still no flushing
       addEdits(log, hri, tableName, 1);
@@ -699,14 +704,14 @@ public class TestHLog  {
       addEdits(log, hri, tableName, 1);
       addEdits(log, hri2, tableName2, 1);
       log.rollWriter();
-      assertEquals(3, log.getNumLogFiles());
+      assertEquals(3, ((FSHLog) log).getNumLogFiles());
 
       // Flush the first region, we expect to see the first two files getting
       // archived
       long seqId = log.startCacheFlush(hri.getEncodedNameAsBytes());
       log.completeCacheFlush(hri.getEncodedNameAsBytes(), tableName, seqId, false);
       log.rollWriter();
-      assertEquals(2, log.getNumLogFiles());
+      assertEquals(2, ((FSHLog) log).getNumLogFiles());
 
       // Flush the second region, which removes all the remaining output files
       // since the oldest was completely flushed and the two others only contain
@@ -714,7 +719,7 @@ public class TestHLog  {
       seqId = log.startCacheFlush(hri2.getEncodedNameAsBytes());
       log.completeCacheFlush(hri2.getEncodedNameAsBytes(), tableName2, seqId, false);
       log.rollWriter();
-      assertEquals(0, log.getNumLogFiles());
+      assertEquals(0, ((FSHLog) log).getNumLogFiles());
     } finally {
       if (log != null) log.closeAndDelete();
     }
@@ -724,23 +729,23 @@ public class TestHLog  {
   @Test
   public void testGetServerNameFromHLogDirectoryName() throws IOException {
     String hl = conf.get(HConstants.HBASE_DIR) + "/"+
-        HLog.getHLogDirectoryName(new ServerName("hn", 450, 1398).toString());
+        HLogUtil.getHLogDirectoryName(new ServerName("hn", 450, 1398).toString());
 
     // Must not throw exception
-    Assert.assertNull(HLog.getServerNameFromHLogDirectoryName(conf, null));
-    Assert.assertNull(HLog.getServerNameFromHLogDirectoryName(conf,
+    Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf, null));
+    Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf,
         conf.get(HConstants.HBASE_DIR) + "/"));
-    Assert.assertNull( HLog.getServerNameFromHLogDirectoryName(conf, "") );
-    Assert.assertNull( HLog.getServerNameFromHLogDirectoryName(conf, "                  ")
);
-    Assert.assertNull( HLog.getServerNameFromHLogDirectoryName(conf, hl) );
-    Assert.assertNull( HLog.getServerNameFromHLogDirectoryName(conf, hl+"qdf") );
-    Assert.assertNull( HLog.getServerNameFromHLogDirectoryName(conf, "sfqf"+hl+"qdf") );
+    Assert.assertNull( HLogUtil.getServerNameFromHLogDirectoryName(conf, "") );
+    Assert.assertNull( HLogUtil.getServerNameFromHLogDirectoryName(conf, "              
   ") );
+    Assert.assertNull( HLogUtil.getServerNameFromHLogDirectoryName(conf, hl) );
+    Assert.assertNull( HLogUtil.getServerNameFromHLogDirectoryName(conf, hl+"qdf") );
+    Assert.assertNull( HLogUtil.getServerNameFromHLogDirectoryName(conf, "sfqf"+hl+"qdf")
);
 
-    Assert.assertNotNull( HLog.getServerNameFromHLogDirectoryName(conf, conf.get(
+    Assert.assertNotNull( HLogUtil.getServerNameFromHLogDirectoryName(conf, conf.get(
         HConstants.HBASE_DIR) +
         "/.logs/localhost,32984,1343316388997/localhost%2C32984%2C1343316388997.1343316390417"
         ));
-    Assert.assertNotNull( HLog.getServerNameFromHLogDirectoryName(conf, hl+"/qdf") );
+    Assert.assertNotNull( HLogUtil.getServerNameFromHLogDirectoryName(conf, hl+"/qdf") );
   }
 
   /**
@@ -749,7 +754,8 @@ public class TestHLog  {
   @Test
   public void testWALCoprocessorLoaded() throws Exception {
     // test to see whether the coprocessor is loaded or not.
-    HLog log = new HLog(fs, dir, oldLogDir, conf);
+    HLog log = HLogFactory.createHLog(fs, hbaseDir, 
+        getName(), conf);
     try {
       WALCoprocessorHost host = log.getCoprocessorHost();
       Coprocessor c = host.findCoprocessor(SampleRegionWALObserver.class.getName());

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogMethods.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogMethods.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogMethods.java
(original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogMethods.java
Tue Oct  2 19:29:19 2012
@@ -56,7 +56,7 @@ public class TestHLogMethods {
     Path regiondir = util.getDataTestDir("regiondir");
     fs.delete(regiondir, true);
     fs.mkdirs(regiondir);
-    Path recoverededits = HLog.getRegionDirRecoveredEditsDir(regiondir);
+    Path recoverededits = HLogUtil.getRegionDirRecoveredEditsDir(regiondir);
     String first = HLogSplitter.formatRecoveredEditsFileName(-1);
     createFile(fs, recoverededits, first);
     createFile(fs, recoverededits, HLogSplitter.formatRecoveredEditsFileName(0));
@@ -70,7 +70,10 @@ public class TestHLogMethods {
     createFile(fs, recoverededits, last);
     createFile(fs, recoverededits,
       Long.toString(Long.MAX_VALUE) + "." + System.currentTimeMillis());
-    NavigableSet<Path> files = HLog.getSplitEditFilesSorted(fs, regiondir);
+
+    HLog log = HLogFactory.createHLog(fs, regiondir,
+                                      "dummyLogName", util.getConfiguration());
+    NavigableSet<Path> files = HLogUtil.getSplitEditFilesSorted(fs, regiondir);
     assertEquals(7, files.size());
     assertEquals(files.pollFirst().getName(), first);
     assertEquals(files.pollLast().getName(), last);

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java
(original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java
Tue Oct  2 19:29:19 2012
@@ -349,7 +349,7 @@ public class TestHLogSplit {
     Class<?> backupClass = conf.getClass("hbase.regionserver.hlog.reader.impl",
         Reader.class);
     InstrumentedSequenceFileLogWriter.activateFailure = false;
-    HLog.resetLogReaderClass();
+    HLogFactory.resetLogReaderClass();
 
     try {
     Path c1 = new Path(hlogDir, HLOG_FILE_PREFIX + "0");
@@ -371,7 +371,7 @@ public class TestHLogSplit {
     } finally {
       conf.setClass("hbase.regionserver.hlog.reader.impl", backupClass,
           Reader.class);
-      HLog.resetLogReaderClass();
+      HLogFactory.resetLogReaderClass();
     }
   }
 
@@ -382,7 +382,7 @@ public class TestHLogSplit {
     Class<?> backupClass = conf.getClass("hbase.regionserver.hlog.reader.impl",
         Reader.class);
     InstrumentedSequenceFileLogWriter.activateFailure = false;
-    HLog.resetLogReaderClass();
+    HLogFactory.resetLogReaderClass();
 
     try {
       conf.setClass("hbase.regionserver.hlog.reader.impl",
@@ -396,7 +396,7 @@ public class TestHLogSplit {
     } finally {
       conf.setClass("hbase.regionserver.hlog.reader.impl", backupClass,
           Reader.class);
-      HLog.resetLogReaderClass();
+      HLogFactory.resetLogReaderClass();
     }
 
   }
@@ -408,7 +408,7 @@ public class TestHLogSplit {
     Class<?> backupClass = conf.getClass("hbase.regionserver.hlog.reader.impl",
         Reader.class);
     InstrumentedSequenceFileLogWriter.activateFailure = false;
-    HLog.resetLogReaderClass();
+    HLogFactory.resetLogReaderClass();
 
     try {
       conf.setClass("hbase.regionserver.hlog.reader.impl",
@@ -428,7 +428,7 @@ public class TestHLogSplit {
     } finally {
       conf.setClass("hbase.regionserver.hlog.reader.impl", backupClass,
           Reader.class);
-      HLog.resetLogReaderClass();
+      HLogFactory.resetLogReaderClass();
     }
 
   }
@@ -455,7 +455,7 @@ public class TestHLogSplit {
     Path splitLog = getLogForRegion(hbaseDir, TABLE_NAME, REGION);
 
     int actualCount = 0;
-    HLog.Reader in = HLog.getReader(fs, splitLog, conf);
+    HLog.Reader in = HLogFactory.createReader(fs, splitLog, conf);
     HLog.Entry entry;
     while ((entry = in.next()) != null) ++actualCount;
     assertEquals(entryCount-1, actualCount);
@@ -840,14 +840,16 @@ public class TestHLogSplit {
     long oldFlushInterval = conf.getLong(F_INTERVAL, 1000);
     conf.setLong(F_INTERVAL, 1000*1000*100);
     HLog log = null;
-    Path thisTestsDir = new Path(hbaseDir, "testLogRollAfterSplitStart");
+    String logName = "testLogRollAfterSplitStart";
+    Path thisTestsDir = new Path(hbaseDir, logName);
 
     try {
       // put some entries in an HLog
       byte [] tableName = Bytes.toBytes(this.getClass().getName());
       HRegionInfo regioninfo = new HRegionInfo(tableName,
           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
-      log = new HLog(fs, thisTestsDir, oldLogDir, conf);
+      log = HLogFactory.createHLog(fs, hbaseDir, logName, conf);
+      
       final int total = 20;
       for (int i = 0; i < total; i++) {
         WALEdit kvs = new WALEdit();
@@ -858,7 +860,7 @@ public class TestHLogSplit {
       }
       // Send the data to HDFS datanodes and close the HDFS writer
       log.sync();
-      log.cleanupCurrentWriter(log.getFilenum());
+      ((FSHLog) log).cleanupCurrentWriter(log.getFilenum());
 
       /* code taken from ProcessServerShutdown.process()
        * handles RS shutdowns (as observed by the Master)
@@ -980,7 +982,7 @@ public class TestHLogSplit {
         }
  
         fs.mkdirs(new Path(tableDir, region));
-        HLog.Writer writer = HLog.createWriter(fs,
+        HLog.Writer writer = HLogFactory.createWriter(fs,
             julietLog, conf);
         appendEntry(writer, "juliet".getBytes(), ("juliet").getBytes(),
             ("r").getBytes(), FAMILY, QUALIFIER, VALUE, 0);
@@ -1120,16 +1122,18 @@ public class TestHLogSplit {
     regions.add(regionName);
     generateHLogs(-1);
 
+    final HLog log = HLogFactory.createHLog(fs, regiondir, 
+        regionName, conf);
+
     HLogSplitter logSplitter = new HLogSplitter(
         conf, hbaseDir, hlogDir, oldLogDir, fs, null) {
       protected HLog.Writer createWriter(FileSystem fs, Path logfile, Configuration conf)
       throws IOException {
-        HLog.Writer writer = HLog.createWriter(fs, logfile, conf);
+        HLog.Writer writer = HLogFactory.createWriter(fs, logfile, conf);
         // After creating writer, simulate region's
         // replayRecoveredEditsIfAny() which gets SplitEditFiles of this
         // region and delete them, excluding files with '.temp' suffix.
-        NavigableSet<Path> files = HLog.getSplitEditFilesSorted(this.fs,
-            regiondir);
+        NavigableSet<Path> files = HLogUtil.getSplitEditFilesSorted(fs, regiondir);
         if (files != null && !files.isEmpty()) {
           for (Path file : files) {
             if (!this.fs.delete(file, false)) {
@@ -1179,7 +1183,8 @@ public class TestHLogSplit {
     makeRegionDirs(fs, regions);
     fs.mkdirs(hlogDir);
     for (int i = 0; i < writers; i++) {
-      writer[i] = HLog.createWriter(fs, new Path(hlogDir, HLOG_FILE_PREFIX + i), conf);
+      writer[i] = HLogFactory.createWriter(fs, new Path(hlogDir, HLOG_FILE_PREFIX + i), 
+          conf);
       for (int j = 0; j < entries; j++) {
         int prefix = 0;
         for (String region : regions) {
@@ -1198,7 +1203,7 @@ public class TestHLogSplit {
   private Path getLogForRegion(Path rootdir, byte[] table, String region)
   throws IOException {
     Path tdir = HTableDescriptor.getTableDir(rootdir, table);
-    Path editsdir = HLog.getRegionDirRecoveredEditsDir(HRegion.getRegionDir(tdir,
+    Path editsdir = HLogUtil.getRegionDirRecoveredEditsDir(HRegion.getRegionDir(tdir,
       Bytes.toString(region.getBytes())));
     FileStatus [] files = this.fs.listStatus(editsdir);
     assertEquals(1, files.length);
@@ -1283,7 +1288,7 @@ public class TestHLogSplit {
   @SuppressWarnings("unused")
   private void dumpHLog(Path log, FileSystem fs, Configuration conf) throws IOException {
     HLog.Entry entry;
-    HLog.Reader in = HLog.getReader(fs, log, conf);
+    HLog.Reader in = HLogFactory.createReader(fs, log, conf);
     while ((entry = in.next()) != null) {
       System.out.println(entry);
     }
@@ -1291,7 +1296,7 @@ public class TestHLogSplit {
 
   private int countHLog(Path log, FileSystem fs, Configuration conf) throws IOException {
     int count = 0;
-    HLog.Reader in = HLog.getReader(fs, log, conf);
+    HLog.Reader in = HLogFactory.createReader(fs, log, conf);
     while (in.next() != null) {
       count++;
     }
@@ -1324,8 +1329,8 @@ public class TestHLogSplit {
 
   private void injectEmptyFile(String suffix, boolean closeFile)
           throws IOException {
-    HLog.Writer writer = HLog.createWriter(
-            fs, new Path(hlogDir, HLOG_FILE_PREFIX + suffix), conf);
+    HLog.Writer writer = HLogFactory.createWriter( 
+        fs, new Path(hlogDir, HLOG_FILE_PREFIX + suffix), conf);
     if (closeFile) writer.close();
   }
 
@@ -1352,10 +1357,10 @@ public class TestHLogSplit {
     for (int i = 0; i < f1.length; i++) {
       // Regions now have a directory named RECOVERED_EDITS_DIR and in here
       // are split edit files. In below presume only 1.
-      Path rd1 = HLog.getRegionDirRecoveredEditsDir(f1[i].getPath());
+      Path rd1 = HLogUtil.getRegionDirRecoveredEditsDir(f1[i].getPath());
       FileStatus[] rd1fs = fs.listStatus(rd1);
       assertEquals(1, rd1fs.length);
-      Path rd2 = HLog.getRegionDirRecoveredEditsDir(f2[i].getPath());
+      Path rd2 = HLogUtil.getRegionDirRecoveredEditsDir(f2[i].getPath());
       FileStatus[] rd2fs = fs.listStatus(rd2);
       assertEquals(1, rd2fs.length);
       if (!logsAreEqual(rd1fs[0].getPath(), rd2fs[0].getPath())) {
@@ -1367,8 +1372,8 @@ public class TestHLogSplit {
 
   private boolean logsAreEqual(Path p1, Path p2) throws IOException {
     HLog.Reader in1, in2;
-    in1 = HLog.getReader(fs, p1, conf);
-    in2 = HLog.getReader(fs, p2, conf);
+    in1 = HLogFactory.createReader(fs, p1, conf);
+    in2 = HLogFactory.createReader(fs, p2, conf);
     HLog.Entry entry1;
     HLog.Entry entry2;
     while ((entry1 = in1.next()) != null) {

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java
(original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java
Tue Oct  2 19:29:19 2012
@@ -138,7 +138,7 @@ public class TestLogRollAbort {
     HRegionServer server = TEST_UTIL.getRSForFirstRegionInTable(Bytes.toBytes(tableName));
     HLog log = server.getWAL();
 
-    assertTrue("Need HDFS-826 for this test", log.canGetCurReplicas());
+    assertTrue("Need HDFS-826 for this test", ((FSHLog) log).canGetCurReplicas());
     // don't run this test without append support (HDFS-200 & HDFS-142)
     assertTrue("Need append support for this test",
         FSUtils.isAppendSupported(TEST_UTIL.getConfiguration()));
@@ -156,13 +156,13 @@ public class TestLogRollAbort {
     dfsCluster.restartDataNodes();
     LOG.info("Restarted datanodes");
 
-    assertTrue("Should have an outstanding WAL edit", log.hasDeferredEntries());
+    assertTrue("Should have an outstanding WAL edit", ((FSHLog) log).hasDeferredEntries());
     try {
       log.rollWriter(true);
       fail("Log roll should have triggered FailedLogCloseException");
     } catch (FailedLogCloseException flce) {
       assertTrue("Should have deferred flush log edits outstanding",
-          log.hasDeferredEntries());
+          ((FSHLog) log).hasDeferredEntries());
     }
   }
 

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
(original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
Tue Oct  2 19:29:19 2012
@@ -192,7 +192,7 @@ public class TestLogRolling  {
   public void testLogRolling() throws FailedLogCloseException, IOException {
     this.tableName = getName();
       startAndWriteData();
-      LOG.info("after writing there are " + log.getNumLogFiles() + " log files");
+      LOG.info("after writing there are " + ((FSHLog) log).getNumLogFiles() + " log files");
 
       // flush all regions
 
@@ -205,9 +205,9 @@ public class TestLogRolling  {
       // Now roll the log
       log.rollWriter();
 
-      int count = log.getNumLogFiles();
+      int count = ((FSHLog) log).getNumLogFiles();
       LOG.info("after flushing all regions and rolling logs there are " +
-          log.getNumLogFiles() + " log files");
+                                      ((FSHLog) log).getNumLogFiles() + " log files");
       assertTrue(("actual count: " + count), count <= 2);
   }
 
@@ -268,7 +268,7 @@ public class TestLogRolling  {
    */
   DatanodeInfo[] getPipeline(HLog log) throws IllegalArgumentException,
       IllegalAccessException, InvocationTargetException {
-    OutputStream stm = log.getOutputStream();
+    OutputStream stm = ((FSHLog) log).getOutputStream();
     Method getPipeline = null;
     for (Method m : stm.getClass().getDeclaredMethods()) {
       if (m.getName().endsWith("getPipeline")) {
@@ -315,7 +315,7 @@ public class TestLogRolling  {
     server = TEST_UTIL.getRSForFirstRegionInTable(Bytes.toBytes(tableName));
     this.log = server.getWAL();
 
-    assertTrue("Need HDFS-826 for this test", log.canGetCurReplicas());
+    assertTrue("Need HDFS-826 for this test", ((FSHLog) log).canGetCurReplicas());
     // don't run this test without append support (HDFS-200 & HDFS-142)
     assertTrue("Need append support for this test", FSUtils
         .isAppendSupported(TEST_UTIL.getConfiguration()));
@@ -342,12 +342,12 @@ public class TestLogRolling  {
     writeData(table, 2);
 
     long curTime = System.currentTimeMillis();
-    long oldFilenum = log.getFilenum();
+    long oldFilenum = ((FSHLog) log).getFilenum();
     assertTrue("Log should have a timestamp older than now",
         curTime > oldFilenum && oldFilenum != -1);
 
     assertTrue("The log shouldn't have rolled yet",
-      oldFilenum == log.getFilenum());
+      oldFilenum == ((FSHLog) log).getFilenum());
     final DatanodeInfo[] pipeline = getPipeline(log);
     assertTrue(pipeline.length == fs.getDefaultReplication());
 
@@ -357,7 +357,7 @@ public class TestLogRolling  {
 
     // this write should succeed, but trigger a log roll
     writeData(table, 2);
-    long newFilenum = log.getFilenum();
+    long newFilenum = ((FSHLog) log).getFilenum();
 
     assertTrue("Missing datanode should've triggered a log roll",
         newFilenum > oldFilenum && newFilenum > curTime);
@@ -365,7 +365,7 @@ public class TestLogRolling  {
     // write some more log data (this should use a new hdfs_out)
     writeData(table, 3);
     assertTrue("The log should not roll again.",
-      log.getFilenum() == newFilenum);
+      ((FSHLog) log).getFilenum() == newFilenum);
     // kill another datanode in the pipeline, so the replicas will be lower than
     // the configured value 2.
     assertTrue(dfsCluster.stopDataNode(pipeline[1].getName()) != null);
@@ -382,8 +382,8 @@ public class TestLogRolling  {
     log.rollWriter(true);
     batchWriteAndWait(table, 13, true, 10000);
     assertTrue("New log file should have the default replication instead of " +
-      log.getLogReplication(),
-      log.getLogReplication() == fs.getDefaultReplication());
+      ((FSHLog) log).getLogReplication(),
+      ((FSHLog) log).getLogReplication() == fs.getDefaultReplication());
     assertTrue("LowReplication Roller should've been enabled",
         log.isLowReplicationRollEnabled());
   }
@@ -417,7 +417,7 @@ public class TestLogRolling  {
     this.log = server.getWAL();
     final List<Path> paths = new ArrayList<Path>();
     final List<Integer> preLogRolledCalled = new ArrayList<Integer>();
-    paths.add(log.computeFilename());
+    paths.add(((FSHLog) log).computeFilename());
     log.registerWALActionsListener(new WALActionsListener() {
       @Override
       public void preLogRoll(Path oldFile, Path newFile)  {
@@ -444,7 +444,7 @@ public class TestLogRolling  {
           WALEdit logEdit) {}
     });
 
-    assertTrue("Need HDFS-826 for this test", log.canGetCurReplicas());
+    assertTrue("Need HDFS-826 for this test", ((FSHLog) log).canGetCurReplicas());
     // don't run this test without append support (HDFS-200 & HDFS-142)
     assertTrue("Need append support for this test", FSUtils
         .isAppendSupported(TEST_UTIL.getConfiguration()));
@@ -498,7 +498,8 @@ public class TestLogRolling  {
       LOG.debug("Reading HLog "+FSUtils.getPath(p));
       HLog.Reader reader = null;
       try {
-        reader = HLog.getReader(fs, p, TEST_UTIL.getConfiguration());
+        reader = HLogFactory.createReader(fs, p, 
+            TEST_UTIL.getConfiguration());
         HLog.Entry entry;
         while ((entry = reader.next()) != null) {
           LOG.debug("#"+entry.getKey().getLogSeqNum()+": "+entry.getEdit().getKeyValues());

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java
(original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java
Tue Oct  2 19:29:19 2012
@@ -53,8 +53,9 @@ public class TestLogRollingNoCluster {
   public void testContendedLogRolling() throws IOException, InterruptedException {
     FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
     Path dir = TEST_UTIL.getDataTestDir();
-    HLog wal = new HLog(fs, new Path(dir, "logs"), new Path(dir, "oldlogs"),
+    HLog wal = HLogFactory.createHLog(fs, dir, "logs",
       TEST_UTIL.getConfiguration());
+    
     Appender [] appenders = null;
 
     final int count = THREAD_COUNT;
@@ -113,7 +114,7 @@ public class TestLogRollingNoCluster {
         for (int i = 0; i < this.count; i++) {
           long now = System.currentTimeMillis();
           // Roll every ten edits if the log has anything in it.
-          if (i % 10 == 0 && this.wal.getNumEntries() > 0) {
+          if (i % 10 == 0 && ((FSHLog) this.wal).getNumEntries() > 0) {
             this.wal.rollWriter();
           }
           WALEdit edit = new WALEdit();
@@ -136,4 +137,7 @@ public class TestLogRollingNoCluster {
     }
   }
 
+  //@org.junit.Rule
+  //public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
+  //  new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
 }

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java
(original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java
Tue Oct  2 19:29:19 2012
@@ -50,6 +50,7 @@ public class TestWALActionsListener {
   private static FileSystem fs;
   private static Path oldLogDir;
   private static Path logDir;
+  private static String logName;
   private static Configuration conf;
 
   @BeforeClass
@@ -59,8 +60,9 @@ public class TestWALActionsListener {
     fs = FileSystem.get(conf);
     oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
         HConstants.HREGION_OLDLOGDIR_NAME);
+    logName = HConstants.HREGION_LOGDIR_NAME;
     logDir = new Path(TEST_UTIL.getDataTestDir(),
-        HConstants.HREGION_LOGDIR_NAME);
+        logName);
   }
 
   @Before
@@ -85,7 +87,8 @@ public class TestWALActionsListener {
     List<WALActionsListener> list = new ArrayList<WALActionsListener>();
     list.add(observer);
     DummyWALActionsListener laterobserver = new DummyWALActionsListener();
-    HLog hlog = new HLog(fs, logDir, oldLogDir, conf, list, null);
+    HLog hlog = HLogFactory.createHLog(fs, TEST_UTIL.getDataTestDir(), logName,
+                                       conf, list, null);
     HRegionInfo hri = new HRegionInfo(SOME_BYTES,
              SOME_BYTES, SOME_BYTES, false);
 

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
(original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
Tue Oct  2 19:29:19 2012
@@ -71,6 +71,7 @@ public class TestWALReplay {
   static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
   private final EnvironmentEdge ee = EnvironmentEdgeManager.getDelegate();
   private Path hbaseRootDir = null;
+  private String logName;
   private Path oldLogDir;
   private Path logDir;
   private FileSystem fs;
@@ -100,7 +101,8 @@ public class TestWALReplay {
     this.fs = TEST_UTIL.getDFSCluster().getFileSystem();
     this.hbaseRootDir = new Path(this.conf.get(HConstants.HBASE_DIR));
     this.oldLogDir = new Path(this.hbaseRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
-    this.logDir = new Path(this.hbaseRootDir, HConstants.HREGION_LOGDIR_NAME);
+    this.logName = HConstants.HREGION_LOGDIR_NAME;
+    this.logDir = new Path(this.hbaseRootDir, logName);
     if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseRootDir)) {
       TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true);
     }
@@ -408,7 +410,7 @@ public class TestWALReplay {
     wal2.sync();
     // Set down maximum recovery so we dfsclient doesn't linger retrying something
     // long gone.
-    HBaseTestingUtility.setMaxRecoveryErrorCount(wal2.getOutputStream(), 1);
+    HBaseTestingUtility.setMaxRecoveryErrorCount(((FSHLog) wal2).getOutputStream(), 1);
     final Configuration newConf = HBaseConfiguration.create(this.conf);
     User user = HBaseTestingUtility.getDifferentUser(newConf,
       tableNameStr);
@@ -576,7 +578,7 @@ public class TestWALReplay {
     wal.sync();
     // Set down maximum recovery so we dfsclient doesn't linger retrying something
     // long gone.
-    HBaseTestingUtility.setMaxRecoveryErrorCount(wal.getOutputStream(), 1);
+    HBaseTestingUtility.setMaxRecoveryErrorCount(((FSHLog) wal).getOutputStream(), 1);
     // Make a new conf and a new fs for the splitter to run on so we can take
     // over old wal.
     final Configuration newConf = HBaseConfiguration.create(this.conf);
@@ -676,11 +678,11 @@ public class TestWALReplay {
         lastestSeqNumber, editCount);
   }
   
-  static class MockHLog extends HLog {
+  static class MockHLog extends FSHLog {
     boolean doCompleteCacheFlush = false;
 
-    public MockHLog(FileSystem fs, Path dir, Path oldLogDir, Configuration conf) throws IOException
{
-      super(fs, dir, oldLogDir, conf);
+    public MockHLog(FileSystem fs, Path rootDir, String logName, Configuration conf) throws
IOException {
+      super(fs, rootDir, logName, conf);
     }
 
     @Override
@@ -701,10 +703,10 @@ public class TestWALReplay {
   }
   
   private MockHLog createMockWAL(Configuration conf) throws IOException {
-    MockHLog wal = new MockHLog(FileSystem.get(conf), logDir, oldLogDir, conf);
+    MockHLog wal = new MockHLog(FileSystem.get(conf), hbaseRootDir, logName, conf);
     // Set down maximum recovery so we dfsclient doesn't linger retrying something
     // long gone.
-    HBaseTestingUtility.setMaxRecoveryErrorCount(wal.getOutputStream(), 1);
+    HBaseTestingUtility.setMaxRecoveryErrorCount(((FSHLog) wal).getOutputStream(), 1);
     return wal;
   }
 
@@ -784,10 +786,11 @@ public class TestWALReplay {
    * @throws IOException
    */
   private HLog createWAL(final Configuration c) throws IOException {
-    HLog wal = new HLog(FileSystem.get(c), logDir, oldLogDir, c);
+    HLog wal = HLogFactory.createHLog(FileSystem.get(c), 
+        hbaseRootDir, logName, c);
     // Set down maximum recovery so we dfsclient doesn't linger retrying something
     // long gone.
-    HBaseTestingUtility.setMaxRecoveryErrorCount(wal.getOutputStream(), 1);
+    HBaseTestingUtility.setMaxRecoveryErrorCount(((FSHLog) wal).getOutputStream(), 1);
     return wal;
   }
 

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java
(original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java
Tue Oct  2 19:29:19 2012
@@ -32,6 +32,8 @@ import org.apache.hadoop.hbase.HConstant
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.MediumTests;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
+import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -77,7 +79,8 @@ public class TestReplicationSource {
     Path logPath = new Path(logDir, "log");
     if (!FS.exists(logDir)) FS.mkdirs(logDir);
     if (!FS.exists(oldLogDir)) FS.mkdirs(oldLogDir);
-    HLog.Writer writer = HLog.createWriter(FS, logPath, conf);
+    HLog.Writer writer = HLogFactory.createWriter(FS, 
+        logPath, conf);
     for(int i = 0; i < 3; i++) {
       byte[] b = Bytes.toBytes(Integer.toString(i));
       KeyValue kv = new KeyValue(b,b,b);
@@ -89,7 +92,8 @@ public class TestReplicationSource {
     }
     writer.close();
 
-    HLog.Reader reader = HLog.getReader(FS, logPath, conf);
+    HLog.Reader reader = HLogFactory.createReader(FS, 
+        logPath, conf);
     HLog.Entry entry = reader.next();
     assertNotNull(entry);
 

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
(original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
Tue Oct  2 19:29:19 2012
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.catalog.CatalogTracker;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
@@ -88,6 +89,8 @@ public class TestReplicationSourceManage
 
   private static FileSystem fs;
 
+  private static String logName;
+
   private static Path oldLogDir;
 
   private static Path logDir;
@@ -122,6 +125,7 @@ public class TestReplicationSourceManage
         HConstants.HREGION_OLDLOGDIR_NAME);
     logDir = new Path(utility.getDataTestDir(),
         HConstants.HREGION_LOGDIR_NAME);
+    logName = HConstants.HREGION_LOGDIR_NAME;
 
     manager.addSource(slaveId);
 
@@ -164,8 +168,8 @@ public class TestReplicationSourceManage
 
     List<WALActionsListener> listeners = new ArrayList<WALActionsListener>();
     listeners.add(replication);
-    HLog hlog = new HLog(fs, logDir, oldLogDir, conf, listeners,
-      URLEncoder.encode("regionserver:60020", "UTF8"));
+    HLog hlog = HLogFactory.createHLog(fs, utility.getDataTestDir(), logName,
+        conf, listeners, URLEncoder.encode("regionserver:60020", "UTF8"));
 
     manager.init();
     HTableDescriptor htd = new HTableDescriptor();

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java
(original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java
Tue Oct  2 19:29:19 2012
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.client.Pu
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -262,11 +263,14 @@ public class TestMergeTool extends HBase
     }
 
     // Create a log that we can reuse when we need to open regions
-    Path logPath = new Path("/tmp", HConstants.HREGION_LOGDIR_NAME + "_" +
-      System.currentTimeMillis());
-    LOG.info("Creating log " + logPath.toString());
-    Path oldLogDir = new Path("/tmp", HConstants.HREGION_OLDLOGDIR_NAME);
-    HLog log = new HLog(this.fs, logPath, oldLogDir, this.conf);
+    Path logPath = new Path("/tmp");
+    String logName = HConstants.HREGION_LOGDIR_NAME + "_"
+      + System.currentTimeMillis();
+    LOG.info("Creating log " + logPath.toString() + "/" + logName);
+
+    HLog log = HLogFactory.createHLog(this.fs, logPath, 
+        logName, this.conf);
+    
     try {
        // Merge Region 0 and Region 1
       HRegion merged = mergeAndVerify("merging regions 0 and 1",



Mime
View raw message