hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r1393126 [3/4] - in /hbase/trunk/hbase-server/src: main/java/org/apache/hadoop/hbase/backup/example/ main/java/org/apache/hadoop/hbase/fs/ main/java/org/apache/hadoop/hbase/mapreduce/ main/java/org/apache/hadoop/hbase/master/ main/java/org/...
Date Tue, 02 Oct 2012 19:29:21 GMT
Added: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogUtil.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogUtil.java?rev=1393126&view=auto
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogUtil.java (added)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogUtil.java Tue Oct  2 19:29:19 2012
@@ -0,0 +1,324 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver.wal;
+
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableSet;
+import java.util.TreeSet;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader;
+import org.apache.hadoop.hbase.regionserver.wal.HLog.Writer;
+import org.apache.hadoop.hbase.util.FSUtils;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.util.Bytes;
+
+public class HLogUtil {
+  static final Log LOG = LogFactory.getLog(HLogUtil.class);
+
+  static byte[] COMPLETE_CACHE_FLUSH;
+  static {
+    try {
+      COMPLETE_CACHE_FLUSH = "HBASE::CACHEFLUSH"
+          .getBytes(HConstants.UTF8_ENCODING);
+    } catch (UnsupportedEncodingException e) {
+      assert (false);
+    }
+  }
+
+  /**
+   * @param family
+   * @return true if the column is a meta column
+   */
+  public static boolean isMetaFamily(byte[] family) {
+    return Bytes.equals(HLog.METAFAMILY, family);
+  }
+
+  @SuppressWarnings("unchecked")
+  public static Class<? extends HLogKey> getKeyClass(Configuration conf) {
+    return (Class<? extends HLogKey>) conf.getClass(
+        "hbase.regionserver.hlog.keyclass", HLogKey.class);
+  }
+
+  public static HLogKey newKey(Configuration conf) throws IOException {
+    Class<? extends HLogKey> keyClass = getKeyClass(conf);
+    try {
+      return keyClass.newInstance();
+    } catch (InstantiationException e) {
+      throw new IOException("cannot create hlog key");
+    } catch (IllegalAccessException e) {
+      throw new IOException("cannot create hlog key");
+    }
+  }
+
+  /**
+   * Pattern used to validate a HLog file name
+   */
+  private static final Pattern pattern = Pattern.compile(".*\\.\\d*");
+
+  /**
+   * @param filename
+   *          name of the file to validate
+   * @return <tt>true</tt> if the filename matches an HLog, <tt>false</tt>
+   *         otherwise
+   */
+  public static boolean validateHLogFilename(String filename) {
+    return pattern.matcher(filename).matches();
+  }
+
+  /*
+   * Get a reader for the WAL.
+   * 
+   * @param fs
+   * 
+   * @param path
+   * 
+   * @param conf
+   * 
+   * @return A WAL reader. Close when done with it.
+   * 
+   * @throws IOException
+   * 
+   * public static HLog.Reader getReader(final FileSystem fs, final Path path,
+   * Configuration conf) throws IOException { try {
+   * 
+   * if (logReaderClass == null) {
+   * 
+   * logReaderClass = conf.getClass("hbase.regionserver.hlog.reader.impl",
+   * SequenceFileLogReader.class, Reader.class); }
+   * 
+   * 
+   * HLog.Reader reader = logReaderClass.newInstance(); reader.init(fs, path,
+   * conf); return reader; } catch (IOException e) { throw e; } catch (Exception
+   * e) { throw new IOException("Cannot get log reader", e); } }
+   * 
+   * * Get a writer for the WAL.
+   * 
+   * @param path
+   * 
+   * @param conf
+   * 
+   * @return A WAL writer. Close when done with it.
+   * 
+   * @throws IOException
+   * 
+   * public static HLog.Writer createWriter(final FileSystem fs, final Path
+   * path, Configuration conf) throws IOException { try { if (logWriterClass ==
+   * null) { logWriterClass =
+   * conf.getClass("hbase.regionserver.hlog.writer.impl",
+   * SequenceFileLogWriter.class, Writer.class); } FSHLog.Writer writer =
+   * (FSHLog.Writer) logWriterClass.newInstance(); writer.init(fs, path, conf);
+   * return writer; } catch (Exception e) { throw new
+   * IOException("cannot get log writer", e); } }
+   */
+
+  /**
+   * Construct the HLog directory name
+   * 
+   * @param serverName
+   *          Server name formatted as described in {@link ServerName}
+   * @return the relative HLog directory name, e.g.
+   *         <code>.logs/1.example.org,60030,12345</code> if
+   *         <code>serverName</code> passed is
+   *         <code>1.example.org,60030,12345</code>
+   */
+  public static String getHLogDirectoryName(final String serverName) {
+    StringBuilder dirName = new StringBuilder(HConstants.HREGION_LOGDIR_NAME);
+    dirName.append("/");
+    dirName.append(serverName);
+    return dirName.toString();
+  }
+
+  /**
+   * @param regiondir
+   *          This regions directory in the filesystem.
+   * @return The directory that holds recovered edits files for the region
+   *         <code>regiondir</code>
+   */
+  public static Path getRegionDirRecoveredEditsDir(final Path regiondir) {
+    return new Path(regiondir, HLog.RECOVERED_EDITS_DIR);
+  }
+
+  /**
+   * Move aside a bad edits file.
+   * 
+   * @param fs
+   * @param edits
+   *          Edits file to move aside.
+   * @return The name of the moved aside file.
+   * @throws IOException
+   */
+  public static Path moveAsideBadEditsFile(final FileSystem fs, final Path edits)
+      throws IOException {
+    Path moveAsideName = new Path(edits.getParent(), edits.getName() + "."
+        + System.currentTimeMillis());
+    if (!fs.rename(edits, moveAsideName)) {
+      LOG.warn("Rename failed from " + edits + " to " + moveAsideName);
+    }
+    return moveAsideName;
+  }
+
+  /**
+   * @param path
+   *          - the path to analyze. Expected format, if it's in hlog directory:
+   *          / [base directory for hbase] / hbase / .logs / ServerName /
+   *          logfile
+   * @return null if it's not a log file. Returns the ServerName of the region
+   *         server that created this log file otherwise.
+   */
+  public static ServerName getServerNameFromHLogDirectoryName(
+      Configuration conf, String path) throws IOException {
+    if (path == null
+        || path.length() <= HConstants.HREGION_LOGDIR_NAME.length()) {
+      return null;
+    }
+
+    if (conf == null) {
+      throw new IllegalArgumentException("parameter conf must be set");
+    }
+
+    final String rootDir = conf.get(HConstants.HBASE_DIR);
+    if (rootDir == null || rootDir.isEmpty()) {
+      throw new IllegalArgumentException(HConstants.HBASE_DIR
+          + " key not found in conf.");
+    }
+
+    final StringBuilder startPathSB = new StringBuilder(rootDir);
+    if (!rootDir.endsWith("/"))
+      startPathSB.append('/');
+    startPathSB.append(HConstants.HREGION_LOGDIR_NAME);
+    if (!HConstants.HREGION_LOGDIR_NAME.endsWith("/"))
+      startPathSB.append('/');
+    final String startPath = startPathSB.toString();
+
+    String fullPath;
+    try {
+      fullPath = FileSystem.get(conf).makeQualified(new Path(path)).toString();
+    } catch (IllegalArgumentException e) {
+      LOG.info("Call to makeQualified failed on " + path + " " + e.getMessage());
+      return null;
+    }
+
+    if (!fullPath.startsWith(startPath)) {
+      return null;
+    }
+
+    final String serverNameAndFile = fullPath.substring(startPath.length());
+
+    if (serverNameAndFile.indexOf('/') < "a,0,0".length()) {
+      // Either it's a file (not a directory) or it's not a ServerName format
+      return null;
+    }
+
+    final String serverName = serverNameAndFile.substring(0,
+        serverNameAndFile.indexOf('/') - 1);
+
+    if (!ServerName.isFullServerName(serverName)) {
+      return null;
+    }
+
+    return ServerName.parseServerName(serverName);
+  }
+
+  /**
+   * Return regions (memstores) that have edits that are equal or less than the
+   * passed <code>oldestWALseqid</code>.
+   * 
+   * @param oldestWALseqid
+   * @param regionsToSeqids
+   *          Encoded region names to sequence ids
+   * @return All regions whose seqid is < than <code>oldestWALseqid</code> (Not
+   *         necessarily in order). Null if no regions found.
+   */
+  static byte[][] findMemstoresWithEditsEqualOrOlderThan(
+      final long oldestWALseqid, final Map<byte[], Long> regionsToSeqids) {
+    // This method is static so it can be unit tested the easier.
+    List<byte[]> regions = null;
+    for (Map.Entry<byte[], Long> e : regionsToSeqids.entrySet()) {
+      if (e.getValue().longValue() <= oldestWALseqid) {
+        if (regions == null)
+          regions = new ArrayList<byte[]>();
+        // Key is encoded region name.
+        regions.add(e.getKey());
+      }
+    }
+    return regions == null ? null : regions
+        .toArray(new byte[][] { HConstants.EMPTY_BYTE_ARRAY });
+  }
+
+  /**
+   * Returns sorted set of edit files made by wal-log splitter, excluding files
+   * with '.temp' suffix.
+   * 
+   * @param fs
+   * @param regiondir
+   * @return Files in passed <code>regiondir</code> as a sorted set.
+   * @throws IOException
+   */
+  public static NavigableSet<Path> getSplitEditFilesSorted(final FileSystem fs,
+      final Path regiondir) throws IOException {
+    NavigableSet<Path> filesSorted = new TreeSet<Path>();
+    Path editsdir = HLogUtil.getRegionDirRecoveredEditsDir(regiondir);
+    if (!fs.exists(editsdir))
+      return filesSorted;
+    FileStatus[] files = FSUtils.listStatus(fs, editsdir, new PathFilter() {
+      @Override
+      public boolean accept(Path p) {
+        boolean result = false;
+        try {
+          // Return files and only files that match the editfile names pattern.
+          // There can be other files in this directory other than edit files.
+          // In particular, on error, we'll move aside the bad edit file giving
+          // it a timestamp suffix. See moveAsideBadEditsFile.
+          Matcher m = HLog.EDITFILES_NAME_PATTERN.matcher(p.getName());
+          result = fs.isFile(p) && m.matches();
+          // Skip the file whose name ends with RECOVERED_LOG_TMPFILE_SUFFIX,
+          // because it means splithlog thread is writting this file.
+          if (p.getName().endsWith(HLog.RECOVERED_LOG_TMPFILE_SUFFIX)) {
+            result = false;
+          }
+        } catch (IOException e) {
+          LOG.warn("Failed isFile check on " + p);
+        }
+        return result;
+      }
+    });
+    if (files == null)
+      return filesSorted;
+    for (FileStatus status : files) {
+      filesSorted.add(status.getPath());
+    }
+    return filesSorted;
+  }
+}

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java Tue Oct  2 19:29:19 2012
@@ -214,7 +214,7 @@ public class SequenceFileLogReader imple
     if (e == null) {
       HLogKey key;
       if (keyClass == null) {
-        key = HLog.newKey(conf);
+        key = HLogUtil.newKey(conf);
       } else {
         try {
           key = keyClass.newInstance();

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java Tue Oct  2 19:29:19 2012
@@ -40,7 +40,7 @@ import org.apache.hadoop.io.compress.Com
 import org.apache.hadoop.io.compress.DefaultCodec;
 
 /**
- * Implementation of {@link HLog.Writer} that delegates to
+ * Implementation of {@link FSHLog.Writer} that delegates to
  * SequenceFile.Writer.
  */
 @InterfaceAudience.Private
@@ -140,7 +140,7 @@ public class SequenceFileLogWriter imple
     }
 
     if (null == keyClass) {
-      keyClass = HLog.getKeyClass(conf);
+      keyClass = HLogUtil.getKeyClass(conf);
     }
 
     // Create a SF.Writer instance.
@@ -152,7 +152,7 @@ public class SequenceFileLogWriter imple
             Configuration.class, Path.class, Class.class, Class.class,
             Integer.TYPE, Short.TYPE, Long.TYPE, Boolean.TYPE,
             CompressionType.class, CompressionCodec.class, Metadata.class})
-        .invoke(null, new Object[] {fs, conf, path, HLog.getKeyClass(conf),
+        .invoke(null, new Object[] {fs, conf, path, HLogUtil.getKeyClass(conf),
             WALEdit.class,
             Integer.valueOf(fs.getConf().getInt("io.file.buffer.size", 4096)),
             Short.valueOf((short)
@@ -175,7 +175,7 @@ public class SequenceFileLogWriter imple
     if (this.writer == null) {
       LOG.debug("new createWriter -- HADOOP-6840 -- not available");
       this.writer = SequenceFile.createWriter(fs, conf, path,
-        HLog.getKeyClass(conf), WALEdit.class,
+        HLogUtil.getKeyClass(conf), WALEdit.class,
         fs.getConf().getInt("io.file.buffer.size", 4096),
         (short) conf.getInt("hbase.regionserver.hlog.replication",
           fs.getDefaultReplication()),

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java Tue Oct  2 19:29:19 2012
@@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.HRegionIn
 import org.apache.hadoop.hbase.HTableDescriptor;
 
 /**
- * Get notification of {@link HLog}/WAL log events. The invocations are inline
+ * Get notification of {@link FSHLog}/WAL log events. The invocations are inline
  * so make sure your implementation is fast else you'll slow hbase.
  */
 @InterfaceAudience.Private

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCoprocessorHost.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCoprocessorHost.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCoprocessorHost.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCoprocessorHost.java Tue Oct  2 19:29:19 2012
@@ -30,7 +30,7 @@ import org.apache.hadoop.conf.Configurat
 
 /**
  * Implements the coprocessor environment and runtime support for coprocessors
- * loaded within a {@link HLog}.
+ * loaded within a {@link FSHLog}.
  */
 @InterfaceAudience.Private
 public class WALCoprocessorHost
@@ -42,10 +42,10 @@ public class WALCoprocessorHost
   static class WALEnvironment extends CoprocessorHost.Environment
     implements WALCoprocessorEnvironment {
 
-    private HLog wal;
+    private FSHLog wal;
 
     @Override
-    public HLog getWAL() {
+    public FSHLog getWAL() {
       return wal;
     }
 
@@ -59,19 +59,19 @@ public class WALCoprocessorHost
      */
     public WALEnvironment(Class<?> implClass, final Coprocessor impl,
         final int priority, final int seq, final Configuration conf,
-        final HLog hlog) {
+        final FSHLog hlog) {
       super(impl, priority, seq, conf);
       this.wal = hlog;
     }
   }
 
-  HLog wal;
+  FSHLog wal;
   /**
    * Constructor
    * @param log the write ahead log
    * @param conf the configuration
    */
-  public WALCoprocessorHost(final HLog log, final Configuration conf) {
+  public WALCoprocessorHost(final FSHLog log, final Configuration conf) {
     this.wal = log;
     // load system default cp's from configuration.
     loadSystemCoprocessors(conf, WAL_COPROCESSOR_CONF_KEY);

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java Tue Oct  2 19:29:19 2012
@@ -52,10 +52,13 @@ import org.apache.hadoop.hbase.client.HC
 import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.replication.ReplicationZookeeper;
 import org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationSourceMetrics;
+import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
@@ -393,7 +396,7 @@ public class ReplicationSource extends T
       this.reader.seek(this.position);
     }
     long startPosition = this.position;
-    HLog.Entry entry = readNextAndSetPosition();
+    HLog.Entry entry = readNextAndSetPosition(); 
     while (entry != null) {
       WALEdit edit = entry.getEdit();
       this.metrics.incrLogEditsRead();
@@ -493,7 +496,8 @@ public class ReplicationSource extends T
           " at " + this.position);
       try {
        this.reader = null;
-       this.reader = HLog.getReader(this.fs, this.currentPath, this.conf);
+       this.reader = HLogFactory.createReader(this.fs, 
+           this.currentPath, this.conf);
       } catch (FileNotFoundException fnfe) {
         if (this.queueRecovered) {
           // We didn't find the log in the archive directory, look if it still

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java Tue Oct  2 19:29:19 2012
@@ -84,8 +84,9 @@ import org.apache.hadoop.hbase.io.hfile.
 import org.apache.hadoop.hbase.master.MasterFileSystem;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
 import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
 import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE;
 import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker;
 import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler;
@@ -2999,7 +3000,7 @@ public class HBaseFsck {
             // This is special case if a region is left after split
             he.hdfsOnlyEdits = true;
             FileStatus[] subDirs = fs.listStatus(regionDir.getPath());
-            Path ePath = HLog.getRegionDirRecoveredEditsDir(regionDir.getPath());
+            Path ePath = HLogUtil.getRegionDirRecoveredEditsDir(regionDir.getPath());
             for (FileStatus subDir : subDirs) {
               String sdName = subDir.getPath().getName();
               if (!sdName.startsWith(".") && !sdName.equals(ePath.getName())) {

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java Tue Oct  2 19:29:19 2012
@@ -33,7 +33,7 @@ import org.apache.hadoop.hbase.regionser
  * Helper class for all utilities related to archival/retrieval of HFiles
  */
 public class HFileArchiveUtil {
-  static final String DEFAULT_HFILE_ARCHIVE_DIRECTORY = ".archive";
+  public static final String DEFAULT_HFILE_ARCHIVE_DIRECTORY = ".archive";
 
   private HFileArchiveUtil() {
     // non-external instantiation - util class

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java Tue Oct  2 19:29:19 2012
@@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.client.Sc
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
 
 /**
  * A non-instantiable class that has a static method capable of compacting
@@ -155,10 +156,9 @@ class HMerge {
           Bytes.toString(tableName)
       );
       this.htd = FSTableDescriptors.getTableDescriptor(this.fs, this.tabledir);
-      Path logdir = new Path(tabledir, "merge_" + System.currentTimeMillis() +
-          HConstants.HREGION_LOGDIR_NAME);
-      Path oldLogDir = new Path(tabledir, HConstants.HREGION_OLDLOGDIR_NAME);
-      this.hlog = new HLog(fs, logdir, oldLogDir, conf);
+      String logname = "merge_" + System.currentTimeMillis() + HConstants.HREGION_LOGDIR_NAME;
+
+      this.hlog = HLogFactory.createHLog(fs, tabledir, logname, conf);
     }
 
     void process() throws IOException {

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java Tue Oct  2 19:29:19 2012
@@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.client.Sc
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
 
 /**
  * Contains utility methods for manipulating HBase meta tables.
@@ -97,11 +98,10 @@ public class MetaUtils {
    */
   public synchronized HLog getLog() throws IOException {
     if (this.log == null) {
-      Path logdir = new Path(this.fs.getHomeDirectory(),
-          HConstants.HREGION_LOGDIR_NAME + "_" + System.currentTimeMillis());
-      Path oldLogDir = new Path(this.fs.getHomeDirectory(),
-          HConstants.HREGION_OLDLOGDIR_NAME);
-      this.log = new HLog(this.fs, logdir, oldLogDir, this.conf);
+      String logName = 
+          HConstants.HREGION_LOGDIR_NAME + "_" + System.currentTimeMillis();
+      this.log = HLogFactory.createHLog(this.fs, this.fs.getHomeDirectory(),
+                                        logName, this.conf);
     }
     return this.log;
   }

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java Tue Oct  2 19:29:19 2012
@@ -19,45 +19,49 @@ package org.apache.hadoop.hbase.backup.e
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.concurrent.CountDownLatch;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.LargeTests;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
 import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
-import org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner;
-import org.apache.hadoop.hbase.regionserver.CheckedArchivingHFileCleaner;
 import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Store;
-import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.HFileArchiveTestingUtil;
 import org.apache.hadoop.hbase.util.HFileArchiveUtil;
+import org.apache.hadoop.hbase.util.StoppableImplementation;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.zookeeper.KeeperException;
 import org.junit.After;
 import org.junit.AfterClass;
-import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
 
 /**
  * Spin up a small cluster and check that the hfiles of region are properly long-term archived as
  * specified via the {@link ZKTableArchiveClient}.
  */
-@Category(LargeTests.class)
+@Category(MediumTests.class)
 public class TestZooKeeperTableArchiveClient {
 
   private static final Log LOG = LogFactory.getLog(TestZooKeeperTableArchiveClient.class);
@@ -65,10 +69,8 @@ public class TestZooKeeperTableArchiveCl
   private static final String STRING_TABLE_NAME = "test";
   private static final byte[] TEST_FAM = Bytes.toBytes("fam");
   private static final byte[] TABLE_NAME = Bytes.toBytes(STRING_TABLE_NAME);
-  private static final int numRS = 2;
-  private static final int maxTries = 5;
-  private static final long ttl = 1000;
   private static ZKTableArchiveClient archivingClient;
+  private final List<Path> toCleanup = new ArrayList<Path>();
 
   /**
    * Setup the config for the cluster
@@ -76,44 +78,35 @@ public class TestZooKeeperTableArchiveCl
   @BeforeClass
   public static void setupCluster() throws Exception {
     setupConf(UTIL.getConfiguration());
-    UTIL.startMiniCluster(numRS);
+    UTIL.startMiniZKCluster();
     archivingClient = new ZKTableArchiveClient(UTIL.getConfiguration(), UTIL.getHBaseAdmin()
         .getConnection());
+    // make hfile archiving node so we can archive files
+    ZooKeeperWatcher watcher = UTIL.getZooKeeperWatcher();
+    String archivingZNode = ZKTableArchiveClient.getArchiveZNode(UTIL.getConfiguration(), watcher);
+    ZKUtil.createWithParents(watcher, archivingZNode);
   }
 
   private static void setupConf(Configuration conf) {
-    // disable the ui
-    conf.setInt("hbase.regionsever.info.port", -1);
-    // change the flush size to a small amount, regulating number of store files
-    conf.setInt("hbase.hregion.memstore.flush.size", 25000);
-    // so make sure we get a compaction when doing a load, but keep around some
-    // files in the store
-    conf.setInt("hbase.hstore.compaction.min", 10);
-    conf.setInt("hbase.hstore.compactionThreshold", 10);
-    // block writes if we get to 12 store files
-    conf.setInt("hbase.hstore.blockingStoreFiles", 12);
+    // only compact with 3 files
+    conf.setInt("hbase.hstore.compaction.min", 3);
     // drop the number of attempts for the hbase admin
     conf.setInt("hbase.client.retries.number", 1);
-    // set the ttl on the hfiles
-    conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
-    conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
-      CheckedArchivingHFileCleaner.class.getCanonicalName(),
-      LongTermArchivingHFileCleaner.class.getCanonicalName());
-  }
-
-  @Before
-  public void setup() throws Exception {
-    UTIL.createTable(TABLE_NAME, TEST_FAM);
   }
 
   @After
   public void tearDown() throws Exception {
-    UTIL.deleteTable(TABLE_NAME);
-    // and cleanup the archive directory
     try {
-      UTIL.getTestFileSystem().delete(new Path(UTIL.getDefaultRootDirPath(), ".archive"), true);
+      FileSystem fs = UTIL.getTestFileSystem();
+      // cleanup each of the files/directories registered
+      for (Path file : toCleanup) {
+      // remove the table and archive directories
+        FSUtils.delete(fs, file, true);
+      }
     } catch (IOException e) {
       LOG.warn("Failure to delete archive directory", e);
+    } finally {
+      toCleanup.clear();
     }
     // make sure that backups are off for all tables
     archivingClient.disableHFileBackup();
@@ -122,7 +115,7 @@ public class TestZooKeeperTableArchiveCl
   @AfterClass
   public static void cleanupTest() throws Exception {
     try {
-      UTIL.shutdownMiniCluster();
+      UTIL.shutdownMiniZKCluster();
     } catch (Exception e) {
       LOG.warn("problem shutting down cluster", e);
     }
@@ -156,227 +149,263 @@ public class TestZooKeeperTableArchiveCl
 
   @Test
   public void testArchivingOnSingleTable() throws Exception {
-    // turn on hfile retention
-    LOG.debug("----Starting archiving");
-    archivingClient.enableHFileBackupAsync(TABLE_NAME);
-    assertTrue("Archving didn't get turned on", archivingClient
-        .getArchivingEnabled(TABLE_NAME));
+    createArchiveDirectory();
+    FileSystem fs = UTIL.getTestFileSystem();
+    Path archiveDir = getArchiveDir();
+    Path tableDir = getTableDir(STRING_TABLE_NAME);
+    toCleanup.add(archiveDir);
+    toCleanup.add(tableDir);
 
-    // get the RS and region serving our table
-    List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME);
-    // make sure we only have 1 region serving this table
-    assertEquals(1, servingRegions.size());
-    HRegion region = servingRegions.get(0);
-
-    // get the parent RS and monitor
-    HRegionServer hrs = UTIL.getRSForFirstRegionInTable(TABLE_NAME);
-    FileSystem fs = hrs.getFileSystem();
-
-    // put some data on the region
-    LOG.debug("-------Loading table");
-    UTIL.loadRegion(region, TEST_FAM);
-    loadAndCompact(region);
-
-    // check that we actually have some store files that were archived
-    Store store = region.getStore(TEST_FAM);
-    Path storeArchiveDir = HFileArchiveTestingUtil.getStoreArchivePath(UTIL.getConfiguration(),
-      region, store);
-
-    // check to make sure we archived some files
-    assertTrue("Didn't create a store archive directory", fs.exists(storeArchiveDir));
-    assertTrue("No files in the store archive",
-      FSUtils.listStatus(fs, storeArchiveDir, null).length > 0);
-
-    // and then put some non-tables files in the archive
     Configuration conf = UTIL.getConfiguration();
-    Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
-    // write a tmp file to the archive dir
-    Path tmpFile = new Path(archiveDir, "toDelete");
-    FSDataOutputStream out = fs.create(tmpFile);
-    out.write(1);
-    out.close();
-
-    assertTrue(fs.exists(tmpFile));
-    // make sure we wait long enough for the files to expire
-    Thread.sleep(ttl);
-
-    // print currrent state for comparison
-    FSUtils.logFileSystemState(fs, archiveDir, LOG);
-
-    // ensure there are no archived files after waiting for a timeout
-    ensureHFileCleanersRun();
-
-    // check to make sure the right things get deleted
-    assertTrue("Store archive got deleted", fs.exists(storeArchiveDir));
-    assertTrue("Archived HFiles got deleted",
-      FSUtils.listStatus(fs, storeArchiveDir, null).length > 0);
-
-    assertFalse(
-      "Tmp file (non-table archive file) didn't " + "get deleted, archive dir: "
-          + fs.listStatus(archiveDir), fs.exists(tmpFile));
-    LOG.debug("Turning off hfile backup.");
-    // stop archiving the table
-    archivingClient.disableHFileBackup();
-    LOG.debug("Deleting table from archive.");
-    // now remove the archived table
-    Path primaryTable = new Path(HFileArchiveUtil.getArchivePath(UTIL.getConfiguration()),
-        STRING_TABLE_NAME);
-    fs.delete(primaryTable, true);
-    LOG.debug("Deleted primary table, waiting for file cleaners to run");
-    // and make sure the archive directory is retained after a cleanup
-    // have to do this manually since delegates aren't run if there isn't any files in the archive
-    // dir to cleanup
-    Thread.sleep(ttl);
-    UTIL.getHBaseCluster().getMaster().getHFileCleaner().triggerNow();
-    Thread.sleep(ttl);
-    LOG.debug("File cleaners done, checking results.");
+    // setup the delegate
+    Stoppable stop = new StoppableImplementation();
+    HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
+    List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
+    final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
+
+    // create the region
+    HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM);
+    HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
+
+    loadFlushAndCompact(region, TEST_FAM);
+
+    // get the current hfiles in the archive directory
+    List<Path> files = getAllFiles(fs, archiveDir);
+    if (files == null) {
+      FSUtils.logFileSystemState(fs, UTIL.getDataTestDir(), LOG);
+      throw new RuntimeException("Didn't archive any files!");
+    }
+    CountDownLatch finished = setupCleanerWatching(delegate, cleaners, files.size());
+
+    runCleaner(cleaner, finished, stop);
+
+    // know the cleaner ran, so now check all the files again to make sure they are still there
+    List<Path> archivedFiles = getAllFiles(fs, archiveDir);
+    assertEquals("Archived files changed after running archive cleaner.", files, archivedFiles);
+
     // but we still have the archive directory
     assertTrue(fs.exists(HFileArchiveUtil.getArchivePath(UTIL.getConfiguration())));
   }
 
   /**
-   * Make sure all the {@link HFileCleaner} run.
-   * <p>
-   * Blocking operation up to 3x ttl
-   * @throws InterruptedException
-   */
-  private void ensureHFileCleanersRun() throws InterruptedException {
-    LOG.debug("Waiting on archive cleaners to run...");
-    CheckedArchivingHFileCleaner.resetCheck();
-    do {
-      UTIL.getHBaseCluster().getMaster().getHFileCleaner().triggerNow();
-      LOG.debug("Triggered, sleeping an amount until we can pass the check.");
-      Thread.sleep(ttl);
-    } while (!CheckedArchivingHFileCleaner.getChecked());
-  }
-
-  /**
    * Test archiving/cleaning across multiple tables, where some are retained, and others aren't
-   * @throws Exception
+   * @throws Exception on failure
    */
   @Test
   public void testMultipleTables() throws Exception {
-    archivingClient.enableHFileBackupAsync(TABLE_NAME);
-    assertTrue("Archving didn't get turned on", archivingClient
-        .getArchivingEnabled(TABLE_NAME));
+    createArchiveDirectory();
+    String otherTable = "otherTable";
+
+    FileSystem fs = UTIL.getTestFileSystem();
+    Path archiveDir = getArchiveDir();
+    Path tableDir = getTableDir(STRING_TABLE_NAME);
+    Path otherTableDir = getTableDir(otherTable);
+
+    // register cleanup for the created directories
+    toCleanup.add(archiveDir);
+    toCleanup.add(tableDir);
+    toCleanup.add(otherTableDir);
+    Configuration conf = UTIL.getConfiguration();
+    // setup the delegate
+    Stoppable stop = new StoppableImplementation();
+    HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
+    List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
+    final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
+
+    // create the region
+    HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM);
+    HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
+    loadFlushAndCompact(region, TEST_FAM);
 
     // create the another table that we don't archive
-    String otherTable = "otherTable";
-    UTIL.createTable(Bytes.toBytes(otherTable), TEST_FAM);
+    hcd = new HColumnDescriptor(TEST_FAM);
+    HRegion otherRegion = UTIL.createTestRegion(otherTable, hcd);
+    loadFlushAndCompact(otherRegion, TEST_FAM);
+
+    // get the current hfiles in the archive directory
+    List<Path> files = getAllFiles(fs, archiveDir);
+    if (files == null) {
+      FSUtils.logFileSystemState(fs, archiveDir, LOG);
+      throw new RuntimeException("Didn't load archive any files!");
+    }
+
+    // make sure we have files from both tables
+    int initialCountForPrimary = 0;
+    int initialCountForOtherTable = 0;
+    for (Path file : files) {
+      String tableName = file.getParent().getParent().getParent().getName();
+      // check to which table this file belongs
+      if (tableName.equals(otherTable)) initialCountForOtherTable++;
+      else if (tableName.equals(STRING_TABLE_NAME)) initialCountForPrimary++;
+    }
 
-    // get the parent RS and monitor
-    FileSystem fs = FileSystem.get(UTIL.getConfiguration());
+    assertTrue("Didn't archive files for:" + STRING_TABLE_NAME, initialCountForPrimary > 0);
+    assertTrue("Didn't archive files for:" + otherTable, initialCountForOtherTable > 0);
+
+    // run the cleaners
+    CountDownLatch finished = setupCleanerWatching(delegate, cleaners, files.size());
+    // run the cleaner
+    cleaner.start();
+    // wait for the cleaner to check all the files
+    finished.await();
+    // stop the cleaner
+    stop.stop("");
+
+    // know the cleaner ran, so now check all the files again to make sure they are still there
+    List<Path> archivedFiles = getAllFiles(fs, archiveDir);
+    int archivedForPrimary = 0;
+    for(Path file: archivedFiles) {
+      String tableName = file.getParent().getParent().getParent().getName();
+      // ensure we don't have files from the non-archived table
+      assertFalse("Have a file from the non-archived table: " + file, tableName.equals(otherTable));
+      if (tableName.equals(STRING_TABLE_NAME)) archivedForPrimary++;
+    }
+
+    assertEquals("Not all archived files for the primary table were retained.", initialCountForPrimary,
+      archivedForPrimary);
 
-    // put data in the filesystem of the first table
-    LOG.debug("Loading data into:" + STRING_TABLE_NAME);
-    loadAndCompact(STRING_TABLE_NAME);
-
-    // and some data in the other table
-    LOG.debug("Loading data into:" + otherTable);
-    loadAndCompact(otherTable);
-
-    // make sure we wait long enough for the other table's files to expire
-    ensureHFileCleanersRun();
-
-    // check to make sure the right things get deleted
-    Path primaryStoreArchive = HFileArchiveTestingUtil.getStoreArchivePath(UTIL, STRING_TABLE_NAME,
-      TEST_FAM);
-    Path otherStoreArchive = HFileArchiveTestingUtil
-        .getStoreArchivePath(UTIL, otherTable, TEST_FAM);
-    // make sure the primary store doesn't have any files
-    assertTrue("Store archive got deleted", fs.exists(primaryStoreArchive));
-    assertTrue("Archived HFiles got deleted",
-      FSUtils.listStatus(fs, primaryStoreArchive, null).length > 0);
-    FileStatus[] otherArchiveFiles = FSUtils.listStatus(fs, otherStoreArchive, null);
-    assertNull("Archived HFiles (" + otherStoreArchive
-        + ") should have gotten deleted, but didn't, remaining files:"
-        + getPaths(otherArchiveFiles), otherArchiveFiles);
-    // sleep again to make sure we the other table gets cleaned up
-    ensureHFileCleanersRun();
-    // first pass removes the store archive
-    assertFalse(fs.exists(otherStoreArchive));
-    // second pass removes the region
-    ensureHFileCleanersRun();
-    Path parent = otherStoreArchive.getParent();
-    assertFalse(fs.exists(parent));
-    // third pass remove the table
-    ensureHFileCleanersRun();
-    parent = otherStoreArchive.getParent();
-    assertFalse(fs.exists(parent));
     // but we still have the archive directory
-    assertTrue(fs.exists(HFileArchiveUtil.getArchivePath(UTIL.getConfiguration())));
+    assertTrue("Archive directory was deleted via archiver", fs.exists(archiveDir));
+  }
+
 
-    FSUtils.logFileSystemState(fs, HFileArchiveUtil.getArchivePath(UTIL.getConfiguration()), LOG);
-    UTIL.deleteTable(Bytes.toBytes(otherTable));
+  private void createArchiveDirectory() throws IOException {
+    //create the archive and test directory
+    FileSystem fs = UTIL.getTestFileSystem();
+    Path archiveDir = getArchiveDir();
+    fs.mkdirs(archiveDir);
   }
 
-  private List<Path> getPaths(FileStatus[] files) {
-    if (files == null || files.length == 0) return null;
+  private Path getArchiveDir() throws IOException {
+    return new Path(UTIL.getDataTestDir(), HFileArchiveUtil.DEFAULT_HFILE_ARCHIVE_DIRECTORY);
+  }
 
-    List<Path> paths = new ArrayList<Path>(files.length);
-    for (FileStatus file : files) {
-      paths.add(file.getPath());
+  private Path getTableDir(String tableName) throws IOException {
+    Path testDataDir = UTIL.getDataTestDir();
+    FSUtils.setRootDir(UTIL.getConfiguration(), testDataDir);
+    return new Path(testDataDir, tableName);
+  }
+
+  private HFileCleaner setupAndCreateCleaner(Configuration conf, FileSystem fs, Path archiveDir,
+      Stoppable stop) {
+    conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
+      LongTermArchivingHFileCleaner.class.getCanonicalName());
+    return new HFileCleaner(1000, stop, conf, fs, archiveDir);
+  }
+
+  /**
+   * Start archiving table for given hfile cleaner
+   * @param tableName table to archive
+   * @param cleaner cleaner to check to make sure change propagated
+   * @return underlying {@link LongTermArchivingHFileCleaner} that is managing archiving
+   * @throws IOException on failure
+   * @throws KeeperException on failure
+   */
+  private List<BaseHFileCleanerDelegate> turnOnArchiving(String tableName, HFileCleaner cleaner)
+      throws IOException, KeeperException {
+    // turn on hfile retention
+    LOG.debug("----Starting archiving for table:" + tableName);
+    archivingClient.enableHFileBackupAsync(Bytes.toBytes(tableName));
+    assertTrue("Archving didn't get turned on", archivingClient.getArchivingEnabled(tableName));
+
+    // wait for the archiver to get the notification
+    List<BaseHFileCleanerDelegate> cleaners = cleaner.getDelegatesForTesting();
+    LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
+    while (!delegate.archiveTracker.keepHFiles(STRING_TABLE_NAME)) {
+      // spin until propagation - should be fast
     }
-    return paths;
+    return cleaners;
   }
 
-  private void loadAndCompact(String tableName) throws Exception {
-    byte[] table = Bytes.toBytes(tableName);
-    // get the RS and region serving our table
-    List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(table);
-    // make sure we only have 1 region serving this table
-    assertEquals(1, servingRegions.size());
-    HRegion region = servingRegions.get(0);
-
-    // get the parent RS and monitor
-    HRegionServer hrs = UTIL.getRSForFirstRegionInTable(table);
-    FileSystem fs = hrs.getFileSystem();
-
-    // put some data on the region
-    LOG.debug("-------Loading table");
-    UTIL.loadRegion(region, TEST_FAM);
-    loadAndCompact(region);
-
-    // check that we actually have some store files that were archived
-    Store store = region.getStore(TEST_FAM);
-    Path storeArchiveDir = HFileArchiveTestingUtil.getStoreArchivePath(UTIL.getConfiguration(),
-      region, store);
-
-    // check to make sure we archived some files
-    assertTrue("Didn't create a store archive directory", fs.exists(storeArchiveDir));
-    assertTrue("No files in the store archive",
-      FSUtils.listStatus(fs, storeArchiveDir, null).length > 0);
+  /**
+   * Spy on the {@link LongTermArchivingHFileCleaner} to ensure we can catch when the cleaner has
+   * seen all the files
+   * @return a {@link CountDownLatch} to wait on that releases when the cleaner has been called at
+   *         least the expected number of times.
+   */
+  private CountDownLatch setupCleanerWatching(LongTermArchivingHFileCleaner cleaner,
+      List<BaseHFileCleanerDelegate> cleaners, final int expected) {
+    // replace the cleaner with one that we can can check
+    BaseHFileCleanerDelegate delegateSpy = Mockito.spy(cleaner);
+    final int[] counter = new int[] { 0 };
+    final CountDownLatch finished = new CountDownLatch(1);
+    Mockito.doAnswer(new Answer<Boolean>() {
+
+      @Override
+      public Boolean answer(InvocationOnMock invocation) throws Throwable {
+        counter[0]++;
+        LOG.debug(counter[0] + "/ " + expected + ") Mocking call to isFileDeletable");
+        if (counter[0] > expected) finished.countDown();
+        return (Boolean) invocation.callRealMethod();
 
-    // wait for the compactions to finish
-    region.waitForFlushesAndCompactions();
+      }
+    }).when(delegateSpy).isFileDeletable(Mockito.any(Path.class));
+    cleaners.set(0, delegateSpy);
+
+    return finished;
   }
 
   /**
-   * Load the given region and then ensure that it compacts some files
+   * Get all the files (non-directory entries) in the file system under the passed directory
+   * @param dir directory to investigate
+   * @return all files under the directory
    */
-  private void loadAndCompact(HRegion region) throws Exception {
-    int tries = 0;
-    Exception last = null;
-    while (tries++ <= maxTries) {
-      try {
-        // load the region with data
-        UTIL.loadRegion(region, TEST_FAM);
-        // and then trigger a compaction to be sure we try to archive
-        compactRegion(region, TEST_FAM);
-        return;
-      } catch (Exception e) {
-        // keep this around for if we fail later
-        last = e;
+  private List<Path> getAllFiles(FileSystem fs, Path dir) throws IOException {
+    FileStatus[] files = FSUtils.listStatus(fs, dir, null);
+    if (files == null) return null;
+
+    List<Path> allFiles = new ArrayList<Path>();
+    for (FileStatus file : files) {
+      if (file.isDir()) {
+        List<Path> subFiles = getAllFiles(fs, file.getPath());
+        if (subFiles != null) allFiles.addAll(subFiles);
+        continue;
       }
+      allFiles.add(file.getPath());
     }
-    throw last;
+    return allFiles;
+  }
+
+  private void loadFlushAndCompact(HRegion region, byte[] family) throws IOException {
+    // create two hfiles in the region
+    createHFileInRegion(region, family);
+    createHFileInRegion(region, family);
+
+    Store s = region.getStore(family);
+    int count = s.getStorefilesCount();
+    assertTrue("Don't have the expected store files, wanted >= 2 store files, but was:" + count,
+      count >= 2);
+
+    // compact the two files into one file to get files in the archive
+    LOG.debug("Compacting stores");
+    region.compactStores(true);
+  }
+
+  /**
+   * Create a new hfile in the passed region
+   * @param region region to operate on
+   * @param columnFamily family for which to add data
+   * @throws IOException
+   */
+  private void createHFileInRegion(HRegion region, byte[] columnFamily) throws IOException {
+    // put one row in the region
+    Put p = new Put(Bytes.toBytes("row"));
+    p.add(columnFamily, Bytes.toBytes("Qual"), Bytes.toBytes("v1"));
+    region.put(p);
+    // flush the region to make a store file
+    region.flushcache();
   }
 
   /**
-   * Compact all the store files in a given region.
+   * @param cleaner
    */
-  private void compactRegion(HRegion region, byte[] family) throws IOException {
-    Store store = region.getStores().get(TEST_FAM);
-    store.compactRecentForTesting(store.getStorefiles().size());
+  private void runCleaner(HFileCleaner cleaner, CountDownLatch finished, Stoppable stop)
+      throws InterruptedException {
+    // run the cleaner
+    cleaner.start();
+    // wait for the cleaner to check all the files
+    finished.await();
+    // stop the cleaner
+    stop.stop("");
   }
 }

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java Tue Oct  2 19:29:19 2012
@@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.util.Bytes;
 

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java Tue Oct  2 19:29:19 2012
@@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.*;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
 import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter;
 import org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
@@ -78,6 +79,7 @@ public class TestWALObserver {
   private FileSystem fs;
   private Path dir;
   private Path hbaseRootDir;
+  private String logName;
   private Path oldLogDir;
   private Path logDir;
 
@@ -112,6 +114,7 @@ public class TestWALObserver {
     this.dir = new Path(this.hbaseRootDir, TestWALObserver.class.getName());
     this.oldLogDir = new Path(this.hbaseRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
     this.logDir = new Path(this.hbaseRootDir, HConstants.HREGION_LOGDIR_NAME);
+    this.logName = HConstants.HREGION_LOGDIR_NAME;
 
     if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseRootDir)) {
       TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true);
@@ -138,7 +141,8 @@ public class TestWALObserver {
     deleteDir(basedir);
     fs.mkdirs(new Path(basedir, hri.getEncodedName()));
 
-    HLog log = new HLog(this.fs, this.dir, this.oldLogDir, this.conf);
+    HLog log = HLogFactory.createHLog(this.fs, hbaseRootDir, 
+        TestWALObserver.class.getName(), this.conf);
     SampleRegionWALObserver cp = getCoprocessor(log);
 
     // TEST_FAMILY[0] shall be removed from WALEdit.
@@ -285,7 +289,8 @@ public class TestWALObserver {
    */
   @Test
   public void testWALObserverLoaded() throws Exception {
-    HLog log = new HLog(fs, dir, oldLogDir, conf);
+    HLog log = HLogFactory.createHLog(fs, hbaseRootDir,
+                                      TestWALObserver.class.getName(), conf);
     assertNotNull(getCoprocessor(log));
   }
 
@@ -357,8 +362,7 @@ public class TestWALObserver {
     return splits.get(0);
   }
   private HLog createWAL(final Configuration c) throws IOException {
-    HLog wal = new HLog(FileSystem.get(c), logDir, oldLogDir, c);
-    return wal;
+    return HLogFactory.createHLog(FileSystem.get(c), hbaseRootDir, logName, c);
   }
   private void addWALEdits (final byte [] tableName, final HRegionInfo hri,
       final byte [] rowName, final byte [] family,

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java Tue Oct  2 19:29:19 2012
@@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.client.HT
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -397,7 +398,7 @@ public class TestBlockReorder {
 
       // Check that it will be possible to extract a ServerName from our construction
       Assert.assertNotNull("log= " + pseudoLogFile,
-          HLog.getServerNameFromHLogDirectoryName(dfs.getConf(), pseudoLogFile));
+          HLogUtil.getServerNameFromHLogDirectoryName(dfs.getConf(), pseudoLogFile));
 
       // And check we're doing the right reorder.
       lrb.reorderBlocks(conf, l, pseudoLogFile);

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHLogRecordReader.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHLogRecordReader.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHLogRecordReader.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHLogRecordReader.java Tue Oct  2 19:29:19 2012
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.MediumTests;
 import org.apache.hadoop.hbase.mapreduce.HLogInputFormat.HLogRecordReader;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.mapreduce.InputSplit;
@@ -64,7 +65,7 @@ public class TestHLogRecordReader {
   private static final byte [] value = Bytes.toBytes("value");
   private static HTableDescriptor htd;
   private static Path logDir;
-  private static Path oldLogDir;
+  private static String logName;
 
   private static String getName() {
     return "TestHLogRecordReader";
@@ -90,8 +91,10 @@ public class TestHLogRecordReader {
     fs = TEST_UTIL.getDFSCluster().getFileSystem();
 
     hbaseDir = TEST_UTIL.createRootDir();
-    logDir = new Path(hbaseDir, HConstants.HREGION_LOGDIR_NAME);
-    oldLogDir = new Path(hbaseDir, HConstants.HREGION_OLDLOGDIR_NAME);
+    
+    logName = HConstants.HREGION_LOGDIR_NAME;
+    logDir = new Path(hbaseDir, logName);
+
     htd = new HTableDescriptor(tableName);
     htd.addFamily(new HColumnDescriptor(family));
   }
@@ -107,7 +110,8 @@ public class TestHLogRecordReader {
    */
   @Test
   public void testPartialRead() throws Exception {
-    HLog log = new HLog(fs, logDir, oldLogDir, conf);
+    HLog log = HLogFactory.createHLog(fs, hbaseDir,
+                                      logName, conf);
     long ts = System.currentTimeMillis();
     WALEdit edit = new WALEdit();
     edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"),
@@ -163,7 +167,7 @@ public class TestHLogRecordReader {
    */
   @Test
   public void testHLogRecordReader() throws Exception {
-    HLog log = new HLog(fs, logDir, oldLogDir, conf);
+    HLog log = HLogFactory.createHLog(fs, hbaseDir, logName, conf);
     byte [] value = Bytes.toBytes("value");
     WALEdit edit = new WALEdit();
     edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"),

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java Tue Oct  2 19:29:19 2012
@@ -50,6 +50,8 @@ import org.apache.hadoop.hbase.protobuf.
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
+import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -132,7 +134,7 @@ public class TestDistributedLogSplitting
       regions = ProtobufUtil.getOnlineRegions(hrs);
       if (regions.size() != 0) break;
     }
-    final Path logDir = new Path(rootdir, HLog.getHLogDirectoryName(hrs
+    final Path logDir = new Path(rootdir, HLogUtil.getHLogDirectoryName(hrs
         .getServerName().toString()));
 
     LOG.info("#regions = " + regions.size());
@@ -153,7 +155,7 @@ public class TestDistributedLogSplitting
 
       Path tdir = HTableDescriptor.getTableDir(rootdir, table);
       Path editsdir =
-        HLog.getRegionDirRecoveredEditsDir(HRegion.getRegionDir(tdir, hri.getEncodedName()));
+        HLogUtil.getRegionDirRecoveredEditsDir(HRegion.getRegionDir(tdir, hri.getEncodedName()));
       LOG.debug("checking edits dir " + editsdir);
       FileStatus[] files = fs.listStatus(editsdir);
       assertEquals(1, files.length);
@@ -185,7 +187,7 @@ public class TestDistributedLogSplitting
     HRegionServer hrs = rsts.get(0).getRegionServer();
     Path rootdir = FSUtils.getRootDir(conf);
     final Path logDir = new Path(rootdir,
-        HLog.getHLogDirectoryName(hrs.getServerName().toString()));
+        HLogUtil.getHLogDirectoryName(hrs.getServerName().toString()));
 
     installTable(new ZooKeeperWatcher(conf, "table-creation", null),
         "table", "family", 40);
@@ -433,7 +435,7 @@ public class TestDistributedLogSplitting
   private int countHLog(Path log, FileSystem fs, Configuration conf)
   throws IOException {
     int count = 0;
-    HLog.Reader in = HLog.getReader(fs, log, conf);
+    HLog.Reader in = HLogFactory.createReader(fs, log, conf);
     while (in.next() != null) {
       count++;
     }

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java Tue Oct  2 19:29:19 2012
@@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.io.hfile.
 import org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2;
 import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 
 import org.junit.After;
@@ -157,11 +158,13 @@ public class TestCacheOnWriteInSchema {
 
     // Create a store based on the schema
     Path basedir = new Path(DIR);
-    Path logdir = new Path(DIR+"/logs");
-    Path oldLogDir = new Path(basedir, HConstants.HREGION_OLDLOGDIR_NAME);
+    String logName = "logs";
+    Path logdir = new Path(DIR, logName);
     fs.delete(logdir, true);
+
     HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
-    HLog hlog = new HLog(fs, logdir, oldLogDir, conf);
+    HLog hlog = HLogFactory.createHLog(fs, basedir, logName, conf);
+    
     HRegion region = new HRegion(basedir, hlog, fs, conf, info, htd, null);
     store = new HStore(basedir, region, hcd, fs, conf);
   }

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java Tue Oct  2 19:29:19 2012
@@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.io.hfile.
 import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactSelection;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 
 import com.google.common.collect.Lists;
@@ -73,7 +74,8 @@ public class TestCompactSelection extend
 
     //Setting up a Store
     Path basedir = new Path(DIR);
-    Path logdir = new Path(DIR+"/logs");
+    String logName = "logs";
+    Path logdir = new Path(DIR, logName);
     Path oldLogDir = new Path(basedir, HConstants.HREGION_OLDLOGDIR_NAME);
     HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("family"));
     FileSystem fs = FileSystem.get(conf);
@@ -84,7 +86,8 @@ public class TestCompactSelection extend
     htd.addFamily(hcd);
     HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
 
-    HLog hlog = new HLog(fs, logdir, oldLogDir, conf);
+    HLog hlog = HLogFactory.createHLog(fs, basedir, 
+        logName, conf);
     HRegion region = HRegion.createHRegion(info, basedir, conf, htd);
     HRegion.closeHRegion(region);
     Path tableDir = new Path(basedir, Bytes.toString(htd.getName()));

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java Tue Oct  2 19:29:19 2012
@@ -80,7 +80,10 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
 import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
+import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
+import org.apache.hadoop.hbase.regionserver.wal.HLogMetrics;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -214,7 +217,7 @@ public class TestHRegion extends HBaseTe
       FileSystem fs = region.getFilesystem();
       byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
 
-      Path recoveredEditsDir = HLog.getRegionDirRecoveredEditsDir(regiondir);
+      Path recoveredEditsDir = HLogUtil.getRegionDirRecoveredEditsDir(regiondir);
 
       long maxSeqId = 1050;
       long minSeqId = 1000;
@@ -222,7 +225,8 @@ public class TestHRegion extends HBaseTe
       for (long i = minSeqId; i <= maxSeqId; i += 10) {
         Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
         fs.create(recoveredEdits);
-        HLog.Writer writer = HLog.createWriter(fs, recoveredEdits, conf);
+        HLog.Writer writer = HLogFactory.createWriter(fs, 
+            recoveredEdits, conf);
 
         long time = System.nanoTime();
         WALEdit edit = new WALEdit();
@@ -265,7 +269,7 @@ public class TestHRegion extends HBaseTe
       FileSystem fs = region.getFilesystem();
       byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
 
-      Path recoveredEditsDir = HLog.getRegionDirRecoveredEditsDir(regiondir);
+      Path recoveredEditsDir = HLogUtil.getRegionDirRecoveredEditsDir(regiondir);
 
       long maxSeqId = 1050;
       long minSeqId = 1000;
@@ -273,7 +277,8 @@ public class TestHRegion extends HBaseTe
       for (long i = minSeqId; i <= maxSeqId; i += 10) {
         Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
         fs.create(recoveredEdits);
-        HLog.Writer writer = HLog.createWriter(fs, recoveredEdits, conf);
+        HLog.Writer writer = HLogFactory.createWriter(fs, 
+            recoveredEdits, conf);
 
         long time = System.nanoTime();
         WALEdit edit = new WALEdit();
@@ -320,7 +325,7 @@ public class TestHRegion extends HBaseTe
       Path regiondir = region.getRegionDir();
       FileSystem fs = region.getFilesystem();
 
-      Path recoveredEditsDir = HLog.getRegionDirRecoveredEditsDir(regiondir);
+      Path recoveredEditsDir = HLogUtil.getRegionDirRecoveredEditsDir(regiondir);
       for (int i = 1000; i < 1050; i += 10) {
         Path recoveredEdits = new Path(
             recoveredEditsDir, String.format("%019d", i));
@@ -594,8 +599,8 @@ public class TestHRegion extends HBaseTe
     byte[] val = Bytes.toBytes("val");
     this.region = initHRegion(b, getName(), cf);
     try {
-      HLog.getSyncTime(); // clear counter from prior tests
-      assertEquals(0, HLog.getSyncTime().count);
+      HLogMetrics.getSyncTime(); // clear counter from prior tests
+      assertEquals(0, HLogMetrics.getSyncTime().count);
 
       LOG.info("First a batch put with all valid puts");
       final Put[] puts = new Put[10];
@@ -610,7 +615,7 @@ public class TestHRegion extends HBaseTe
         assertEquals(OperationStatusCode.SUCCESS, codes[i]
             .getOperationStatusCode());
       }
-      assertEquals(1, HLog.getSyncTime().count);
+      assertEquals(1, HLogMetrics.getSyncTime().count);
 
       LOG.info("Next a batch put with one invalid family");
       puts[5].add(Bytes.toBytes("BAD_CF"), qual, val);
@@ -620,7 +625,7 @@ public class TestHRegion extends HBaseTe
         assertEquals((i == 5) ? OperationStatusCode.BAD_FAMILY :
           OperationStatusCode.SUCCESS, codes[i].getOperationStatusCode());
       }
-      assertEquals(1, HLog.getSyncTime().count);
+      assertEquals(1, HLogMetrics.getSyncTime().count);
 
       LOG.info("Next a batch put that has to break into two batches to avoid a lock");
       Integer lockedRow = region.obtainRowLock(Bytes.toBytes("row_2"));
@@ -641,7 +646,7 @@ public class TestHRegion extends HBaseTe
   
       LOG.info("...waiting for put thread to sync first time");
       long startWait = System.currentTimeMillis();
-      while (HLog.getSyncTime().count == 0) {
+      while (HLogMetrics.getSyncTime().count == 0) {
         Thread.sleep(100);
         if (System.currentTimeMillis() - startWait > 10000) {
           fail("Timed out waiting for thread to sync first minibatch");
@@ -652,7 +657,7 @@ public class TestHRegion extends HBaseTe
       LOG.info("...joining on thread");
       ctx.stop();
       LOG.info("...checking that next batch was synced");
-      assertEquals(1, HLog.getSyncTime().count);
+      assertEquals(1, HLogMetrics.getSyncTime().count);
       codes = retFromThread.get();
       for (int i = 0; i < 10; i++) {
         assertEquals((i == 5) ? OperationStatusCode.BAD_FAMILY :
@@ -676,7 +681,7 @@ public class TestHRegion extends HBaseTe
           OperationStatusCode.SUCCESS, codes[i].getOperationStatusCode());
       }
       // Make sure we didn't do an extra batch
-      assertEquals(1, HLog.getSyncTime().count);
+      assertEquals(1, HLogMetrics.getSyncTime().count);
   
       // Make sure we still hold lock
       assertTrue(region.isRowLocked(lockedRow));
@@ -702,8 +707,8 @@ public class TestHRegion extends HBaseTe
     this.region = initHRegion(b, getName(), conf, cf);
 
     try{
-      HLog.getSyncTime(); // clear counter from prior tests
-      assertEquals(0, HLog.getSyncTime().count);
+      HLogMetrics.getSyncTime(); // clear counter from prior tests
+      assertEquals(0, HLogMetrics.getSyncTime().count);
 
       final Put[] puts = new Put[10];
       for (int i = 0; i < 10; i++) {
@@ -717,7 +722,7 @@ public class TestHRegion extends HBaseTe
         assertEquals(OperationStatusCode.SANITY_CHECK_FAILURE, codes[i]
             .getOperationStatusCode());
       }
-      assertEquals(0, HLog.getSyncTime().count);
+      assertEquals(0, HLogMetrics.getSyncTime().count);
 
 
     } finally {

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java Tue Oct  2 19:29:19 2012
@@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.coprocess
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.PairOfSameType;
 import org.apache.zookeeper.KeeperException;
@@ -73,9 +74,9 @@ public class TestSplitTransaction {
     this.fs = FileSystem.get(TEST_UTIL.getConfiguration());
     TEST_UTIL.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, CustomObserver.class.getName());
     this.fs.delete(this.testdir, true);
-    this.wal = new HLog(fs, new Path(this.testdir, "logs"),
-      new Path(this.testdir, "archive"),
+    this.wal = HLogFactory.createHLog(fs, this.testdir, "logs",
       TEST_UTIL.getConfiguration());
+    
     this.parent = createRegion(this.testdir, this.wal);
     RegionCoprocessorHost host = new RegionCoprocessorHost(this.parent, null, TEST_UTIL.getConfiguration());
     this.parent.setCoprocessorHost(host);

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java Tue Oct  2 19:29:19 2012
@@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.io.hfile.
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -134,8 +135,9 @@ public class TestStore extends TestCase 
       HColumnDescriptor hcd) throws IOException {
     //Setting up a Store
     Path basedir = new Path(DIR+methodName);
-    Path logdir = new Path(DIR+methodName+"/logs");
-    Path oldLogDir = new Path(basedir, HConstants.HREGION_OLDLOGDIR_NAME);
+    String logName = "logs";
+    Path logdir = new Path(basedir, logName);
+
     FileSystem fs = FileSystem.get(conf);
 
     fs.delete(logdir, true);
@@ -143,7 +145,7 @@ public class TestStore extends TestCase 
     HTableDescriptor htd = new HTableDescriptor(table);
     htd.addFamily(hcd);
     HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
-    HLog hlog = new HLog(fs, logdir, oldLogDir, conf);
+    HLog hlog = HLogFactory.createHLog(fs, basedir, logName, conf);
     HRegion region = new HRegion(basedir, hlog, fs, conf, info, htd, null);
 
     store = new HStore(basedir, region, hcd, fs, conf);

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultySequenceFileLogReader.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultySequenceFileLogReader.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultySequenceFileLogReader.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultySequenceFileLogReader.java Tue Oct  2 19:29:19 2012
@@ -24,6 +24,7 @@ import java.util.LinkedList;
 import java.util.Queue;
 
 import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry;
+import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
 
 public class FaultySequenceFileLogReader extends SequenceFileLogReader {
 
@@ -45,7 +46,7 @@ public class FaultySequenceFileLogReader
 
     if (nextQueue.isEmpty()) { // Read the whole thing at once and fake reading
       while (b == true) {
-        HLogKey key = HLog.newKey(conf);
+        HLogKey key = HLogUtil.newKey(conf);
         WALEdit val = new WALEdit();
         HLog.Entry e = new HLog.Entry(key, val);
         b = this.reader.next(e.getKey(), e.getEdit());

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogPerformanceEvaluation.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogPerformanceEvaluation.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogPerformanceEvaluation.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogPerformanceEvaluation.java Tue Oct  2 19:29:19 2012
@@ -46,6 +46,8 @@ import org.apache.hadoop.hbase.client.Pu
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
 import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry;
+import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
+import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 
 /**
@@ -178,8 +180,7 @@ public final class HLogPerformanceEvalua
       // Initialize Table Descriptor
       HTableDescriptor htd = createHTableDescriptor(numFamilies);
       final long whenToRoll = roll;
-      HLog hlog = new HLog(fs, new Path(rootRegionDir, "wals"),
-          new Path(rootRegionDir, "old.wals"), getConf()) {
+      HLog hlog = new FSHLog(fs, rootRegionDir, "wals", getConf()) {
         int appends = 0;
         protected void doWrite(HRegionInfo info, HLogKey logKey, WALEdit logEdit,
             HTableDescriptor htd)
@@ -204,7 +205,7 @@ public final class HLogPerformanceEvalua
           region = null;
         }
         if (verify) {
-          Path dir = hlog.getDir();
+          Path dir = ((FSHLog) hlog).getDir();
           long editCount = 0;
           for (FileStatus fss: fs.listStatus(dir)) {
             editCount += verify(fss.getPath(), verbose);
@@ -244,7 +245,8 @@ public final class HLogPerformanceEvalua
    * @throws IOException
    */
   private long verify(final Path wal, final boolean verbose) throws IOException {
-    HLog.Reader reader = HLog.getReader(wal.getFileSystem(getConf()), wal, getConf());
+    HLog.Reader reader = HLogFactory.createReader(wal.getFileSystem(getConf()), 
+        wal, getConf());
     long previousSeqid = -1;
     long count = 0;
     try {

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogUtilsForTests.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogUtilsForTests.java?rev=1393126&r1=1393125&r2=1393126&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogUtilsForTests.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogUtilsForTests.java Tue Oct  2 19:29:19 2012
@@ -36,10 +36,10 @@ public class HLogUtilsForTests {
    * @return
    */
   public static int getNumLogFiles(HLog log) {
-    return log.getNumLogFiles();
+    return ((FSHLog) log).getNumLogFiles();
   }
 
   public static int getNumEntries(HLog log) {
-    return log.getNumEntries();
+    return ((FSHLog) log).getNumEntries();
   }
 }



Mime
View raw message