hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject [11/12] hbase git commit: HBASE-10378 Refactor write-ahead-log implementation -- ADDEDNUM
Date Tue, 18 Nov 2014 20:13:05 GMT
http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
index 8b26eea..215ff16 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreFile.Reader;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
+import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.util.Pair;
 
@@ -444,14 +445,32 @@ public abstract class BaseRegionObserver implements RegionObserver {
       final InternalScanner s) throws IOException {
   }
 
+  /**
+   * Implementers should override this version of the method and leave the deprecated one as-is.
+   */
+  @Override
+  public void preWALRestore(ObserverContext<? extends RegionCoprocessorEnvironment> env,
+      HRegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {
+  }
+
   @Override
   public void preWALRestore(ObserverContext<RegionCoprocessorEnvironment> env, HRegionInfo info,
       HLogKey logKey, WALEdit logEdit) throws IOException {
+    preWALRestore(env, info, (WALKey)logKey, logEdit);
+  }
+
+  /**
+   * Implementers should override this version of the method and leave the deprecated one as-is.
+   */
+  @Override
+  public void postWALRestore(ObserverContext<? extends RegionCoprocessorEnvironment> env,
+      HRegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {
   }
 
   @Override
   public void postWALRestore(ObserverContext<RegionCoprocessorEnvironment> env,
       HRegionInfo info, HLogKey logKey, WALEdit logEdit) throws IOException {
+    postWALRestore(env, info, (WALKey)logKey, logEdit);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseWALObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseWALObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseWALObserver.java
index 0836da9..cfddcd4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseWALObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseWALObserver.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
+import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 
 /**
@@ -42,13 +43,31 @@ public class BaseWALObserver implements WALObserver {
   @Override
   public void stop(CoprocessorEnvironment e) throws IOException { }
 
+  /**
+   * Implementers should override this method and leave the deprecated version as-is.
+   */
+  @Override
+  public boolean preWALWrite(ObserverContext<? extends WALCoprocessorEnvironment> ctx,
+      HRegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {
+    return false;
+  }
+
   @Override
   public boolean preWALWrite(ObserverContext<WALCoprocessorEnvironment> ctx, HRegionInfo info,
       HLogKey logKey, WALEdit logEdit) throws IOException {
-    return false;
+    return preWALWrite(ctx, info, (WALKey)logKey, logEdit);
   }
 
+  /**
+   * Implementers should override this method and leave the deprecated version as-is.
+   */
+  @Override
+  public void postWALWrite(ObserverContext<? extends WALCoprocessorEnvironment> ctx,
+      HRegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { }
+
   @Override
   public void postWALWrite(ObserverContext<WALCoprocessorEnvironment> ctx, HRegionInfo info,
-      HLogKey logKey, WALEdit logEdit) throws IOException { }
+      HLogKey logKey, WALEdit logEdit) throws IOException {
+    postWALWrite(ctx, info, (WALKey)logKey, logEdit);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
index 17fcabc..f819fbc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
@@ -29,6 +29,7 @@ import java.util.Set;
 import java.util.SortedSet;
 import java.util.TreeSet;
 import java.util.UUID;
+import java.util.concurrent.ConcurrentSkipListSet;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.atomic.AtomicInteger;
 
@@ -557,4 +558,77 @@ public abstract class CoprocessorHost<E extends CoprocessorEnvironment> {
           "coprocessor set.", e);
     }
   }
+
+  /**
+   * Used to gracefully handle fallback to deprecated methods when we
+   * evolve coprocessor APIs.
+   *
+   * When a particular Coprocessor API is updated to change methods, hosts can support fallback
+   * to the deprecated API by using this method to determine if an instance implements the new API.
+   * In the event that said support is partial, then in the face of a runtime issue that prevents
+   * proper operation {@link #legacyWarning(Class, String)} should be used to let operators know.
+   *
+   * For examples of this in action, see the implementation of
+   * <ul>
+   *   <li>{@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost}
+   *   <li>{@link org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost}
+   * </ul>
+   *
+   * @param clazz Coprocessor you wish to evaluate
+   * @param methodName the name of the non-deprecated method version
+   * @param parameterTypes the Class of the non-deprecated method's arguments in the order they are
+   *     declared.
+   */
+  @InterfaceAudience.Private
+  protected static boolean useLegacyMethod(final Class<? extends Coprocessor> clazz,
+      final String methodName, final Class<?>... parameterTypes) {
+    boolean useLegacy;
+    // Use reflection to see if they implement the non-deprecated version
+    try {
+      clazz.getDeclaredMethod(methodName, parameterTypes);
+      LOG.debug("Found an implementation of '" + methodName + "' that uses updated method " +
+          "signature. Skipping legacy support for invocations in '" + clazz +"'.");
+      useLegacy = false;
+    } catch (NoSuchMethodException exception) {
+      useLegacy = true;
+    } catch (SecurityException exception) {
+      LOG.warn("The Security Manager denied our attempt to detect if the coprocessor '" + clazz +
+          "' requires legacy support; assuming it does. If you get later errors about legacy " +
+          "coprocessor use, consider updating your security policy to allow access to the package" +
+          " and declared members of your implementation.");
+      LOG.debug("Details of Security Manager rejection.", exception);
+      useLegacy = true;
+    }
+    return useLegacy;
+  }
+
+  /**
+   * Used to limit legacy handling to once per Coprocessor class per classloader.
+   */
+  private static final Set<Class<? extends Coprocessor>> legacyWarning =
+      new ConcurrentSkipListSet<Class<? extends Coprocessor>>(
+          new Comparator<Class<? extends Coprocessor>>() {
+            @Override
+            public int compare(Class<? extends Coprocessor> c1, Class<? extends Coprocessor> c2) {
+              if (c1.equals(c2)) {
+                return 0;
+              }
+              return c1.getName().compareTo(c2.getName());
+            }
+          });
+
+  /**
+   * limits the amount of logging to once per coprocessor class.
+   * Used in concert with {@link #useLegacyMethod(Class, String, Class[])} when a runtime issue
+   * prevents properly supporting the legacy version of a coprocessor API.
+   * Since coprocessors can be in tight loops this serves to limit the amount of log spam we create.
+   */
+  @InterfaceAudience.Private
+  protected void legacyWarning(final Class<? extends Coprocessor> clazz, final String message) {
+    if(legacyWarning.add(clazz)) {
+      LOG.error("You have a legacy coprocessor loaded and there are events we can't map to the " +
+          " deprecated API. Your coprocessor will not see these events.  Please update '" + clazz +
+          "'. Details of the problem: " + message);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
index e526d63..ee43cba 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreFileScanner;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
+import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.util.Pair;
 
@@ -67,6 +68,9 @@ import com.google.common.collect.ImmutableList;
  */
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
 @InterfaceStability.Evolving
+// TODO as method signatures need to break, update to
+// ObserverContext<? extends RegionCoprocessorEnvironment>
+// so we can use additional environment state that isn't exposed to coprocessors.
 public interface RegionObserver extends Coprocessor {
 
   /** Mutation type for postMutationBeforeWAL hook */
@@ -1109,26 +1113,62 @@ public interface RegionObserver extends Coprocessor {
   /**
    * Called before a {@link org.apache.hadoop.hbase.regionserver.wal.WALEdit}
    * replayed for this region.
+   */
+  void preWALRestore(final ObserverContext<? extends RegionCoprocessorEnvironment> ctx,
+      HRegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException;
+
+  /**
+   * Called before a {@link org.apache.hadoop.hbase.regionserver.wal.WALEdit}
+   * replayed for this region.
    *
-   * @param ctx
-   * @param info
-   * @param logKey
-   * @param logEdit
-   * @throws IOException
+   * This method is left in place to maintain binary compatibility with older
+   * {@link RegionObserver}s. If an implementation directly overrides
+   * {@link #preWALRestore(ObserverContext, HRegionInfo, WALKey, WALEdit)} then this version
+   * won't be called at all, barring problems with the Security Manager. To work correctly
+   * in the presence of a strict Security Manager, or in the case of an implementation that
+   * relies on a parent class to implement preWALRestore, you should implement this method
+   * as a call to the non-deprecated version.
+   *
+   * Users of this method will see all edits that can be treated as HLogKey. If there are
+   * edits that can't be treated as HLogKey they won't be offered to coprocessors that rely
+   * on this method. If a coprocessor gets skipped because of this mechanism, a log message
+   * at ERROR will be generated per coprocessor on the logger for {@link CoprocessorHost} once per
+   * classloader.
+   *
+   * @deprecated use {@link #preWALRestore(ObserverContext, HRegionInfo, WALKey, WALEdit)}
    */
+  @Deprecated
   void preWALRestore(final ObserverContext<RegionCoprocessorEnvironment> ctx,
       HRegionInfo info, HLogKey logKey, WALEdit logEdit) throws IOException;
 
   /**
    * Called after a {@link org.apache.hadoop.hbase.regionserver.wal.WALEdit}
    * replayed for this region.
+   */
+  void postWALRestore(final ObserverContext<? extends RegionCoprocessorEnvironment> ctx,
+      HRegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException;
+
+  /**
+   * Called after a {@link org.apache.hadoop.hbase.regionserver.wal.WALEdit}
+   * replayed for this region.
    *
-   * @param ctx
-   * @param info
-   * @param logKey
-   * @param logEdit
-   * @throws IOException
+   * This method is left in place to maintain binary compatibility with older
+   * {@link RegionObserver}s. If an implementation directly overrides
+   * {@link #postWALRestore(ObserverContext, HRegionInfo, WALKey, WALEdit)} then this version
+   * won't be called at all, barring problems with the Security Manager. To work correctly
+   * in the presence of a strict Security Manager, or in the case of an implementation that
+   * relies on a parent class to implement preWALRestore, you should implement this method
+   * as a call to the non-deprecated version.
+   *
+   * Users of this method will see all edits that can be treated as HLogKey. If there are
+   * edits that can't be treated as HLogKey they won't be offered to coprocessors that rely
+   * on this method. If a coprocessor gets skipped because of this mechanism, a log message
+   * at ERROR will be generated per coprocessor on the logger for {@link CoprocessorHost} once per
+   * classloader.
+   *
+   * @deprecated use {@link #postWALRestore(ObserverContext, HRegionInfo, WALKey, WALEdit)}
    */
+  @Deprecated
   void postWALRestore(final ObserverContext<RegionCoprocessorEnvironment> ctx,
       HRegionInfo info, HLogKey logKey, WALEdit logEdit) throws IOException;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java
index d16eed8..a4ce5f1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java
@@ -23,11 +23,11 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.wal.WAL;
 
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
 @InterfaceStability.Evolving
 public interface WALCoprocessorEnvironment extends CoprocessorEnvironment {
-  /** @return reference to the region server services */
-  HLog getWAL();
+  /** @return reference to the region server's WAL */
+  WAL getWAL();
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java
index 49d84ed..bba83cc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.Coprocessor;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
+import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 
 import java.io.IOException;
@@ -40,7 +41,7 @@ import java.io.IOException;
  * hooks for adding logic for WALEdits in the region context during reconstruction,
  *
  * Defines coprocessor hooks for interacting with operations on the
- * {@link org.apache.hadoop.hbase.regionserver.wal.HLog}.
+ * {@link org.apache.hadoop.hbase.wal.WAL}.
  */
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
 @InterfaceStability.Evolving
@@ -50,27 +51,65 @@ public interface WALObserver extends Coprocessor {
    * Called before a {@link org.apache.hadoop.hbase.regionserver.wal.WALEdit}
    * is writen to WAL.
    *
-   * @param ctx
-   * @param info
-   * @param logKey
-   * @param logEdit
    * @return true if default behavior should be bypassed, false otherwise
-   * @throws IOException
    */
   // TODO: return value is not used
+  boolean preWALWrite(ObserverContext<? extends WALCoprocessorEnvironment> ctx,
+      HRegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException;
+
+  /**
+   * Called before a {@link org.apache.hadoop.hbase.regionserver.wal.WALEdit}
+   * is writen to WAL.
+   *
+   * This method is left in place to maintain binary compatibility with older
+   * {@link WALObserver}s. If an implementation directly overrides
+   * {@link #preWALWrite(ObserverContext, HRegionInfo, WALKey, WALEdit)} then this version
+   * won't be called at all, barring problems with the Security Manager. To work correctly
+   * in the presence of a strict Security Manager, or in the case of an implementation that
+   * relies on a parent class to implement preWALWrite, you should implement this method
+   * as a call to the non-deprecated version.
+   *
+   * Users of this method will see all edits that can be treated as HLogKey. If there are
+   * edits that can't be treated as HLogKey they won't be offered to coprocessors that rely
+   * on this method. If a coprocessor gets skipped because of this mechanism, a log message
+   * at ERROR will be generated per coprocessor on the logger for {@link CoprocessorHost} once per
+   * classloader.
+   *
+   * @return true if default behavior should be bypassed, false otherwise
+   * @deprecated use {@link #preWALWrite(ObserverContext, HRegionInfo, WALKey, WALEdit)}
+   */
+  @Deprecated
   boolean preWALWrite(ObserverContext<WALCoprocessorEnvironment> ctx,
       HRegionInfo info, HLogKey logKey, WALEdit logEdit) throws IOException;
 
   /**
    * Called after a {@link org.apache.hadoop.hbase.regionserver.wal.WALEdit}
    * is writen to WAL.
+   */
+  void postWALWrite(ObserverContext<? extends WALCoprocessorEnvironment> ctx,
+      HRegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException;
+
+  /**
+   * Called after a {@link org.apache.hadoop.hbase.regionserver.wal.WALEdit}
+   * is writen to WAL.
+   *
+   * This method is left in place to maintain binary compatibility with older
+   * {@link WALObserver}s. If an implementation directly overrides
+   * {@link #postWALWrite(ObserverContext, HRegionInfo, WALKey, WALEdit)} then this version
+   * won't be called at all, barring problems with the Security Manager. To work correctly
+   * in the presence of a strict Security Manager, or in the case of an implementation that
+   * relies on a parent class to implement preWALWrite, you should implement this method
+   * as a call to the non-deprecated version.
+   *
+   * Users of this method will see all edits that can be treated as HLogKey. If there are
+   * edits that can't be treated as HLogKey they won't be offered to coprocessors that rely
+   * on this method. If a coprocessor gets skipped because of this mechanism, a log message
+   * at ERROR will be generated per coprocessor on the logger for {@link CoprocessorHost} once per
+   * classloader.
    *
-   * @param ctx
-   * @param info
-   * @param logKey
-   * @param logEdit
-   * @throws IOException
+   * @deprecated use {@link #postWALWrite(ObserverContext, HRegionInfo, WALKey, WALEdit)}
    */
+  @Deprecated
   void postWALWrite(ObserverContext<WALCoprocessorEnvironment> ctx,
       HRegionInfo info, HLogKey logKey, WALEdit logEdit) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java
index f8cf7b3..fb58360 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java
@@ -40,7 +40,7 @@ import org.apache.hadoop.fs.FilterFileSystem;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
+import org.apache.hadoop.hbase.wal.DefaultWALProvider;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
@@ -54,8 +54,8 @@ import org.apache.hadoop.util.ReflectionUtils;
 /**
  * An encapsulation for the FileSystem object that hbase uses to access
  * data. This class allows the flexibility of using  
- * separate filesystem objects for reading and writing hfiles and hlogs.
- * In future, if we want to make hlogs be in a different filesystem,
+ * separate filesystem objects for reading and writing hfiles and wals.
+ * In future, if we want to make wals be in a different filesystem,
  * this is the place to make it happen.
  */
 public class HFileSystem extends FilterFileSystem {
@@ -322,7 +322,7 @@ public class HFileSystem extends FilterFileSystem {
   }
 
   /**
-   * We're putting at lowest priority the hlog files blocks that are on the same datanode
+   * We're putting at lowest priority the wal files blocks that are on the same datanode
    * as the original regionserver which created these files. This because we fear that the
    * datanode is actually dead, so if we use it it will timeout.
    */
@@ -330,17 +330,17 @@ public class HFileSystem extends FilterFileSystem {
     public void reorderBlocks(Configuration conf, LocatedBlocks lbs, String src)
         throws IOException {
 
-      ServerName sn = HLogUtil.getServerNameFromHLogDirectoryName(conf, src);
+      ServerName sn = DefaultWALProvider.getServerNameFromWALDirectoryName(conf, src);
       if (sn == null) {
-        // It's not an HLOG
+        // It's not an WAL
         return;
       }
 
-      // Ok, so it's an HLog
+      // Ok, so it's an WAL
       String hostName = sn.getHostname();
       if (LOG.isTraceEnabled()) {
         LOG.trace(src +
-            " is an HLog file, so reordering blocks, last hostname will be:" + hostName);
+            " is an WAL file, so reordering blocks, last hostname will be:" + hostName);
       }
 
       // Just check for all blocks

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HLogLink.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HLogLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HLogLink.java
deleted file mode 100644
index e62eb14..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HLogLink.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.io;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.util.FSUtils;
-
-/**
- * HLogLink describes a link to a WAL.
- *
- * An hlog can be in /hbase/.logs/<server>/<hlog>
- * or it can be in /hbase/.oldlogs/<hlog>
- *
- * The link checks first in the original path,
- * if it is not present it fallbacks to the archived path.
- */
-@InterfaceAudience.Private
-public class HLogLink extends FileLink {
-  /**
-   * @param conf {@link Configuration} from which to extract specific archive locations
-   * @param serverName Region Server owner of the log
-   * @param logName WAL file name
-   * @throws IOException on unexpected error.
-   */
-  public HLogLink(final Configuration conf,
-      final String serverName, final String logName) throws IOException {
-    this(FSUtils.getRootDir(conf), serverName, logName);
-  }
-
-  /**
-   * @param rootDir Path to the root directory where hbase files are stored
-   * @param serverName Region Server owner of the log
-   * @param logName WAL file name
-   */
-  public HLogLink(final Path rootDir, final String serverName, final String logName) {
-    final Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
-    final Path logDir = new Path(new Path(rootDir, HConstants.HREGION_LOGDIR_NAME), serverName);
-    setLocations(new Path(logDir, logName), new Path(oldLogDir, logName));
-  }
-
-  /**
-   * @param originPath Path to the wal in the log directory
-   * @param archivePath Path to the wal in the archived log directory
-   */
-  public HLogLink(final Path originPath, final Path archivePath) {
-    setLocations(originPath, archivePath);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
index 2c0efc8..e35071e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
@@ -186,12 +186,12 @@ public class HFileOutputFormat2
           rollWriters();
         }
 
-        // create a new HLog writer, if necessary
+        // create a new WAL writer, if necessary
         if (wl == null || wl.writer == null) {
           wl = getNewWriter(family, conf);
         }
 
-        // we now have the proper HLog writer. full steam ahead
+        // we now have the proper WAL writer. full steam ahead
         kv.updateLatestStamp(this.now);
         wl.writer.append(kv);
         wl.written += length;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HLogInputFormat.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HLogInputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HLogInputFormat.java
index 4f604f8..4ed0672 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HLogInputFormat.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HLogInputFormat.java
@@ -17,26 +17,15 @@
  */
 package org.apache.hadoop.hbase.mapreduce;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.EOFException;
 import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
-import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
+import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
-import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.mapreduce.InputFormat;
 import org.apache.hadoop.mapreduce.InputSplit;
 import org.apache.hadoop.mapreduce.JobContext;
@@ -44,227 +33,51 @@ import org.apache.hadoop.mapreduce.RecordReader;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 
 /**
- * Simple {@link InputFormat} for {@link HLog} files.
+ * Simple {@link InputFormat} for {@link WAL} files.
+ * @deprecated use {@link WALInputFormat}
  */
+@Deprecated
 @InterfaceAudience.Public
 public class HLogInputFormat extends InputFormat<HLogKey, WALEdit> {
   private static final Log LOG = LogFactory.getLog(HLogInputFormat.class);
-
   public static final String START_TIME_KEY = "hlog.start.time";
   public static final String END_TIME_KEY = "hlog.end.time";
 
-  /**
-   * {@link InputSplit} for {@link HLog} files. Each split represent
-   * exactly one log file.
-   */
-  static class HLogSplit extends InputSplit implements Writable {
-    private String logFileName;
-    private long fileSize;
-    private long startTime;
-    private long endTime;
-
-    /** for serialization */
-    public HLogSplit() {}
-
-    /**
-     * Represent an HLogSplit, i.e. a single HLog file.
-     * Start- and EndTime are managed by the split, so that HLog files can be
-     * filtered before WALEdits are passed to the mapper(s).
-     * @param logFileName
-     * @param fileSize
-     * @param startTime
-     * @param endTime
-     */
-    public HLogSplit(String logFileName, long fileSize, long startTime, long endTime) {
-      this.logFileName = logFileName;
-      this.fileSize = fileSize;
-      this.startTime = startTime;
-      this.endTime = endTime;
-    }
-
-    @Override
-    public long getLength() throws IOException, InterruptedException {
-      return fileSize;
-    }
-
-    @Override
-    public String[] getLocations() throws IOException, InterruptedException {
-      // TODO: Find the data node with the most blocks for this HLog?
-      return new String[] {};
-    }
-
-    public String getLogFileName() {
-      return logFileName;
-    }
-
-    public long getStartTime() {
-      return startTime;
-    }
-
-    public long getEndTime() {
-      return endTime;
-    }
-
-    @Override
-    public void readFields(DataInput in) throws IOException {
-      logFileName = in.readUTF();
-      fileSize = in.readLong();
-      startTime = in.readLong();
-      endTime = in.readLong();
-    }
-
-    @Override
-    public void write(DataOutput out) throws IOException {
-      out.writeUTF(logFileName);
-      out.writeLong(fileSize);
-      out.writeLong(startTime);
-      out.writeLong(endTime);
-    }
-
-    @Override
-    public String toString() {
-      return logFileName + " (" + startTime + ":" + endTime + ") length:" + fileSize;
-    }
-  }
+  // Delegate to WALInputFormat for implementation.
+  private final WALInputFormat delegate = new WALInputFormat();
 
   /**
-   * {@link RecordReader} for an {@link HLog} file.
+   * {@link RecordReader} that pulls out the legacy HLogKey format directly.
    */
-  static class HLogRecordReader extends RecordReader<HLogKey, WALEdit> {
-    private HLog.Reader reader = null;
-    private HLog.Entry currentEntry = new HLog.Entry();
-    private long startTime;
-    private long endTime;
-
-    @Override
-    public void initialize(InputSplit split, TaskAttemptContext context)
-        throws IOException, InterruptedException {
-      HLogSplit hsplit = (HLogSplit)split;
-      Path logFile = new Path(hsplit.getLogFileName());
-      Configuration conf = context.getConfiguration();
-      LOG.info("Opening reader for "+split);
-      try {
-        this.reader = HLogFactory.createReader(logFile.getFileSystem(conf), 
-            logFile, conf);
-      } catch (EOFException x) {
-        LOG.info("Ignoring corrupted HLog file: " + logFile
-            + " (This is normal when a RegionServer crashed.)");
-      }
-      this.startTime = hsplit.getStartTime();
-      this.endTime = hsplit.getEndTime();
-    }
-
-    @Override
-    public boolean nextKeyValue() throws IOException, InterruptedException {
-      if (reader == null) return false;
-
-      HLog.Entry temp;
-      long i = -1;
-      do {
-        // skip older entries
-        try {
-          temp = reader.next(currentEntry);
-          i++;
-        } catch (EOFException x) {
-          LOG.info("Corrupted entry detected. Ignoring the rest of the file."
-              + " (This is normal when a RegionServer crashed.)");
-          return false;
-        }
-      }
-      while(temp != null && temp.getKey().getWriteTime() < startTime);
-
-      if (temp == null) {
-        if (i > 0) LOG.info("Skipped " + i + " entries.");
-        LOG.info("Reached end of file.");
-        return false;
-      } else if (i > 0) {
-        LOG.info("Skipped " + i + " entries, until ts: " + temp.getKey().getWriteTime() + ".");
-      }
-      boolean res = temp.getKey().getWriteTime() <= endTime;
-      if (!res) {
-        LOG.info("Reached ts: " + temp.getKey().getWriteTime() + " ignoring the rest of the file.");
-      }
-      return res;
-    }
-
+  static class HLogKeyRecordReader extends WALInputFormat.WALRecordReader<HLogKey> {
     @Override
     public HLogKey getCurrentKey() throws IOException, InterruptedException {
-      return currentEntry.getKey();
-    }
-
-    @Override
-    public WALEdit getCurrentValue() throws IOException, InterruptedException {
-      return currentEntry.getEdit();
-    }
-
-    @Override
-    public float getProgress() throws IOException, InterruptedException {
-      // N/A depends on total number of entries, which is unknown
-      return 0;
-    }
-
-    @Override
-    public void close() throws IOException {
-      LOG.info("Closing reader");
-      if (reader != null) this.reader.close();
+      if (!(currentEntry.getKey() instanceof HLogKey)) {
+        final IllegalStateException exception = new IllegalStateException(
+            "HLogInputFormat only works when given entries that have HLogKey for keys. This" +
+            " one had '" + currentEntry.getKey().getClass() + "'");
+        LOG.error("The deprecated HLogInputFormat has to work with the deprecated HLogKey class, " +
+            " but HBase internals read the wal entry using some other class." +
+            " This is a bug; please file an issue or email the developer mailing list. It is " +
+            "likely that you would not have this problem if you updated to use WALInputFormat. " +
+            "You will need the following exception details when seeking help from the HBase " +
+            "community.",
+            exception);
+        throw exception;
+      }
+      return (HLogKey)currentEntry.getKey();
     }
   }
 
   @Override
   public List<InputSplit> getSplits(JobContext context) throws IOException,
       InterruptedException {
-    Configuration conf = context.getConfiguration();
-    Path inputDir = new Path(conf.get("mapreduce.input.fileinputformat.inputdir"));
-
-    long startTime = conf.getLong(START_TIME_KEY, Long.MIN_VALUE);
-    long endTime = conf.getLong(END_TIME_KEY, Long.MAX_VALUE);
-
-    FileSystem fs = inputDir.getFileSystem(conf);
-    List<FileStatus> files = getFiles(fs, inputDir, startTime, endTime);
-
-    List<InputSplit> splits = new ArrayList<InputSplit>(files.size());
-    for (FileStatus file : files) {
-      splits.add(new HLogSplit(file.getPath().toString(), file.getLen(), startTime, endTime));
-    }
-    return splits;
-  }
-
-  private List<FileStatus> getFiles(FileSystem fs, Path dir, long startTime, long endTime)
-      throws IOException {
-    List<FileStatus> result = new ArrayList<FileStatus>();
-    LOG.debug("Scanning " + dir.toString() + " for HLog files");
-
-    FileStatus[] files = fs.listStatus(dir);
-    if (files == null) return Collections.emptyList();
-    for (FileStatus file : files) {
-      if (file.isDirectory()) {
-        // recurse into sub directories
-        result.addAll(getFiles(fs, file.getPath(), startTime, endTime));
-      } else {
-        String name = file.getPath().toString();
-        int idx = name.lastIndexOf('.');
-        if (idx > 0) {
-          try {
-            long fileStartTime = Long.parseLong(name.substring(idx+1));
-            if (fileStartTime <= endTime) {
-              LOG.info("Found: " + name);
-              result.add(file);
-            }
-          } catch (NumberFormatException x) {
-            idx = 0;
-          }
-        }
-        if (idx == 0) {
-          LOG.warn("File " + name + " does not appear to be an HLog file. Skipping...");
-        }
-      }
-    }
-    return result;
+    return delegate.getSplits(context, START_TIME_KEY, END_TIME_KEY);
   }
 
   @Override
   public RecordReader<HLogKey, WALEdit> createRecordReader(InputSplit split,
       TaskAttemptContext context) throws IOException, InterruptedException {
-    return new HLogRecordReader();
+    return new HLogKeyRecordReader();
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java
index c1d8373..62a9626 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java
@@ -51,7 +51,7 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
  * </p>
  *
  * <p>
- * Write-ahead logging (HLog) for Puts can be disabled by setting
+ * Write-ahead logging (WAL) for Puts can be disabled by setting
  * {@link #WAL_PROPERTY} to {@link #WAL_OFF}. Default value is {@link #WAL_ON}.
  * Note that disabling write-ahead logging is only appropriate for jobs where
  * loss of data due to region server failure can be tolerated (for example,
@@ -61,7 +61,7 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public class MultiTableOutputFormat extends OutputFormat<ImmutableBytesWritable, Mutation> {
-  /** Set this to {@link #WAL_OFF} to turn off write-ahead logging (HLog) */
+  /** Set this to {@link #WAL_OFF} to turn off write-ahead logging (WAL) */
   public static final String WAL_PROPERTY = "hbase.mapreduce.multitableoutputformat.wal";
   /** Property value to use write-ahead logging */
   public static final boolean WAL_ON = true;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java
index 4d451a4..79d5261 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java
@@ -49,7 +49,7 @@ import com.google.common.annotations.VisibleForTesting;
 /**
  * TableSnapshotInputFormat allows a MapReduce job to run over a table snapshot. The job
  * bypasses HBase servers, and directly accesses the underlying files (hfile, recovered edits,
- * hlogs, etc) directly to provide maximum performance. The snapshot is not required to be
+ * wals, etc) directly to provide maximum performance. The snapshot is not required to be
  * restored to the live cluster or cloned. This also allows to run the mapreduce job from an
  * online or offline hbase cluster. The snapshot files can be exported by using the
  * {@link ExportSnapshot} tool, to a pure-hdfs cluster, and this InputFormat can be used to

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
index cf9dc56..26fab5a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
@@ -23,6 +23,8 @@ import java.text.SimpleDateFormat;
 import java.util.Map;
 import java.util.TreeMap;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -39,7 +41,7 @@ import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
+import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.mapreduce.Job;
@@ -63,11 +65,21 @@ import org.apache.hadoop.util.ToolRunner;
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public class WALPlayer extends Configured implements Tool {
+  final static Log LOG = LogFactory.getLog(WALPlayer.class);
   final static String NAME = "WALPlayer";
-  final static String BULK_OUTPUT_CONF_KEY = "hlog.bulk.output";
-  final static String HLOG_INPUT_KEY = "hlog.input.dir";
-  final static String TABLES_KEY = "hlog.input.tables";
-  final static String TABLE_MAP_KEY = "hlog.input.tablesmap";
+  final static String BULK_OUTPUT_CONF_KEY = "wal.bulk.output";
+  final static String TABLES_KEY = "wal.input.tables";
+  final static String TABLE_MAP_KEY = "wal.input.tablesmap";
+
+  // This relies on Hadoop Configuration to handle warning about deprecated configs and
+  // to set the correct non-deprecated configs when an old one shows up.
+  static {
+    Configuration.addDeprecation("hlog.bulk.output", BULK_OUTPUT_CONF_KEY);
+    Configuration.addDeprecation("hlog.input.tables", TABLES_KEY);
+    Configuration.addDeprecation("hlog.input.tablesmap", TABLE_MAP_KEY);
+    Configuration.addDeprecation(HLogInputFormat.START_TIME_KEY, WALInputFormat.START_TIME_KEY);
+    Configuration.addDeprecation(HLogInputFormat.END_TIME_KEY, WALInputFormat.END_TIME_KEY);
+  }
 
   private final static String JOB_NAME_CONF_KEY = "mapreduce.job.name";
 
@@ -75,12 +87,12 @@ public class WALPlayer extends Configured implements Tool {
    * A mapper that just writes out KeyValues.
    * This one can be used together with {@link KeyValueSortReducer}
    */
-  static class HLogKeyValueMapper
-  extends Mapper<HLogKey, WALEdit, ImmutableBytesWritable, KeyValue> {
+  static class WALKeyValueMapper
+  extends Mapper<WALKey, WALEdit, ImmutableBytesWritable, KeyValue> {
     private byte[] table;
 
     @Override
-    public void map(HLogKey key, WALEdit value,
+    public void map(WALKey key, WALEdit value,
       Context context)
     throws IOException {
       try {
@@ -102,7 +114,7 @@ public class WALPlayer extends Configured implements Tool {
       // only a single table is supported when HFiles are generated with HFileOutputFormat
       String tables[] = context.getConfiguration().getStrings(TABLES_KEY);
       if (tables == null || tables.length != 1) {
-        // this can only happen when HLogMapper is used directly by a class other than WALPlayer
+        // this can only happen when WALMapper is used directly by a class other than WALPlayer
         throw new IOException("Exactly one table must be specified for bulk HFile case.");
       }
       table = Bytes.toBytes(tables[0]);
@@ -113,13 +125,13 @@ public class WALPlayer extends Configured implements Tool {
    * A mapper that writes out {@link Mutation} to be directly applied to
    * a running HBase instance.
    */
-  static class HLogMapper
-  extends Mapper<HLogKey, WALEdit, ImmutableBytesWritable, Mutation> {
+  static class WALMapper
+  extends Mapper<WALKey, WALEdit, ImmutableBytesWritable, Mutation> {
     private Map<TableName, TableName> tables =
         new TreeMap<TableName, TableName>();
 
     @Override
-    public void map(HLogKey key, WALEdit value,
+    public void map(WALKey key, WALEdit value,
       Context context)
     throws IOException {
       try {
@@ -132,7 +144,7 @@ public class WALPlayer extends Configured implements Tool {
           Delete del = null;
           Cell lastCell = null;
           for (Cell cell : value.getCells()) {
-            // filtering HLog meta entries
+            // filtering WAL meta entries
             if (WALEdit.isMetaEditFamily(cell.getFamily())) continue;
 
             // A WALEdit may contain multiple operations (HBASE-3584) and/or
@@ -172,7 +184,7 @@ public class WALPlayer extends Configured implements Tool {
       String[] tableMap = context.getConfiguration().getStrings(TABLE_MAP_KEY);
       String[] tablesToUse = context.getConfiguration().getStrings(TABLES_KEY);
       if (tablesToUse == null || tableMap == null || tablesToUse.length != tableMap.length) {
-        // this can only happen when HLogMapper is used directly by a class other than WALPlayer
+        // this can only happen when WALMapper is used directly by a class other than WALPlayer
         throw new IOException("No tables or incorrect table mapping specified.");
       }
       int i = 0;
@@ -192,7 +204,7 @@ public class WALPlayer extends Configured implements Tool {
 
   void setupTime(Configuration conf, String option) throws IOException {
     String val = conf.get(option);
-    if (val == null) return;
+    if (null == val) return;
     long ms;
     try {
       // first try to parse in user friendly form
@@ -239,7 +251,7 @@ public class WALPlayer extends Configured implements Tool {
     Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + inputDir));
     job.setJarByClass(WALPlayer.class);
     FileInputFormat.setInputPaths(job, inputDir);
-    job.setInputFormatClass(HLogInputFormat.class);
+    job.setInputFormatClass(WALInputFormat.class);
     job.setMapOutputKeyClass(ImmutableBytesWritable.class);
     String hfileOutPath = conf.get(BULK_OUTPUT_CONF_KEY);
     if (hfileOutPath != null) {
@@ -248,7 +260,7 @@ public class WALPlayer extends Configured implements Tool {
         throw new IOException("Exactly one table must be specified for the bulk export option");
       }
       HTable table = new HTable(conf, TableName.valueOf(tables[0]));
-      job.setMapperClass(HLogKeyValueMapper.class);
+      job.setMapperClass(WALKeyValueMapper.class);
       job.setReducerClass(KeyValueSortReducer.class);
       Path outputDir = new Path(hfileOutPath);
       FileOutputFormat.setOutputPath(job, outputDir);
@@ -258,7 +270,7 @@ public class WALPlayer extends Configured implements Tool {
           com.google.common.base.Preconditions.class);
     } else {
       // output to live cluster
-      job.setMapperClass(HLogMapper.class);
+      job.setMapperClass(WALMapper.class);
       job.setOutputFormatClass(MultiTableOutputFormat.class);
       TableMapReduceUtil.addDependencyJars(job);
       TableMapReduceUtil.initCredentials(job);
@@ -288,8 +300,8 @@ public class WALPlayer extends Configured implements Tool {
     System.err.println("  -D" + BULK_OUTPUT_CONF_KEY + "=/path/for/output");
     System.err.println("  (Only one table can be specified, and no mapping is allowed!)");
     System.err.println("Other options: (specify time range to WAL edit to consider)");
-    System.err.println("  -D" + HLogInputFormat.START_TIME_KEY + "=[date|ms]");
-    System.err.println("  -D" + HLogInputFormat.END_TIME_KEY + "=[date|ms]");
+    System.err.println("  -D" + WALInputFormat.START_TIME_KEY + "=[date|ms]");
+    System.err.println("  -D" + WALInputFormat.END_TIME_KEY + "=[date|ms]");
     System.err.println("   -D " + JOB_NAME_CONF_KEY
         + "=jobName - use the specified mapreduce job name for the wal player");
     System.err.println("For performance also consider the following options:\n"

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index 7c7f0b6..d23f139 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -76,8 +76,7 @@ import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.Regio
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
 import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
 import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
-import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
+import org.apache.hadoop.hbase.wal.DefaultWALProvider;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.KeyLocker;
@@ -463,17 +462,20 @@ public class AssignmentManager {
     }
     if (!failover) {
       // If we get here, we have a full cluster restart. It is a failover only
-      // if there are some HLogs are not split yet. For meta HLogs, they should have
+      // if there are some WALs are not split yet. For meta WALs, they should have
       // been split already, if any. We can walk through those queued dead servers,
-      // if they don't have any HLogs, this restart should be considered as a clean one
+      // if they don't have any WALs, this restart should be considered as a clean one
       Set<ServerName> queuedDeadServers = serverManager.getRequeuedDeadServers().keySet();
       if (!queuedDeadServers.isEmpty()) {
         Configuration conf = server.getConfiguration();
         Path rootdir = FSUtils.getRootDir(conf);
         FileSystem fs = rootdir.getFileSystem(conf);
         for (ServerName serverName: queuedDeadServers) {
-          Path logDir = new Path(rootdir, HLogUtil.getHLogDirectoryName(serverName.toString()));
-          Path splitDir = logDir.suffix(HLog.SPLITTING_EXT);
+          // In the case of a clean exit, the shutdown handler would have presplit any WALs and
+          // removed empty directories.
+          Path logDir = new Path(rootdir,
+              DefaultWALProvider.getWALDirectoryName(serverName.toString()));
+          Path splitDir = logDir.suffix(DefaultWALProvider.SPLITTING_EXT);
           if (fs.exists(logDir) || fs.exists(splitDir)) {
             LOG.debug("Found queued dead server " + serverName);
             failover = true;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index 115cc35..9bf70e9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -51,8 +51,8 @@ import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
 import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
-import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
+import org.apache.hadoop.hbase.wal.DefaultWALProvider;
+import org.apache.hadoop.hbase.wal.WALSplitter;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
@@ -94,14 +94,14 @@ public class MasterFileSystem {
   final static PathFilter META_FILTER = new PathFilter() {
     @Override
     public boolean accept(Path p) {
-      return HLogUtil.isMetaFile(p);
+      return DefaultWALProvider.isMetaFile(p);
     }
   };
 
   final static PathFilter NON_META_FILTER = new PathFilter() {
     @Override
     public boolean accept(Path p) {
-      return !HLogUtil.isMetaFile(p);
+      return !DefaultWALProvider.isMetaFile(p);
     }
   };
 
@@ -216,7 +216,7 @@ public class MasterFileSystem {
    */
   Set<ServerName> getFailedServersFromLogFolders() {
     boolean retrySplitting = !conf.getBoolean("hbase.hlog.split.skip.errors",
-      HLog.SPLIT_SKIP_ERRORS_DEFAULT);
+        WALSplitter.SPLIT_SKIP_ERRORS_DEFAULT);
 
     Set<ServerName> serverNames = new HashSet<ServerName>();
     Path logsDirPath = new Path(this.rootdir, HConstants.HREGION_LOGDIR_NAME);
@@ -239,13 +239,13 @@ public class MasterFileSystem {
           return serverNames;
         }
         for (FileStatus status : logFolders) {
-          String sn = status.getPath().getName();
-          // truncate splitting suffix if present (for ServerName parsing)
-          if (sn.endsWith(HLog.SPLITTING_EXT)) {
-            sn = sn.substring(0, sn.length() - HLog.SPLITTING_EXT.length());
-          }
-          ServerName serverName = ServerName.parseServerName(sn);
-          if (!onlineServers.contains(serverName)) {
+          final ServerName serverName = DefaultWALProvider.getServerNameFromWALDirectoryName(
+              status.getPath());
+          if (null == serverName) {
+            LOG.warn("Log folder " + status.getPath() + " doesn't look like its name includes a " +
+                "region server name; leaving in place. If you see later errors about missing " +
+                "write ahead logs they may be saved in this location.");
+          } else if (!onlineServers.contains(serverName)) {
             LOG.info("Log folder " + status.getPath() + " doesn't belong "
                 + "to a known region server, splitting");
             serverNames.add(serverName);
@@ -283,7 +283,7 @@ public class MasterFileSystem {
   }
 
   /**
-   * Specialized method to handle the splitting for meta HLog
+   * Specialized method to handle the splitting for meta WAL
    * @param serverName
    * @throws IOException
    */
@@ -294,7 +294,7 @@ public class MasterFileSystem {
   }
 
   /**
-   * Specialized method to handle the splitting for meta HLog
+   * Specialized method to handle the splitting for meta WAL
    * @param serverNames
    * @throws IOException
    */
@@ -302,6 +302,9 @@ public class MasterFileSystem {
     splitLog(serverNames, META_FILTER);
   }
 
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="UL_UNRELEASED_LOCK", justification=
+      "We only release this lock when we set it. Updates to code that uses it should verify use " +
+      "of the guard boolean.")
   private List<Path> getLogDirs(final Set<ServerName> serverNames) throws IOException {
     List<Path> logDirs = new ArrayList<Path>();
     boolean needReleaseLock = false;
@@ -312,9 +315,10 @@ public class MasterFileSystem {
     }
     try {
       for (ServerName serverName : serverNames) {
-        Path logDir = new Path(this.rootdir, HLogUtil.getHLogDirectoryName(serverName.toString()));
-        Path splitDir = logDir.suffix(HLog.SPLITTING_EXT);
-        // Rename the directory so a rogue RS doesn't create more HLogs
+        Path logDir = new Path(this.rootdir,
+            DefaultWALProvider.getWALDirectoryName(serverName.toString()));
+        Path splitDir = logDir.suffix(DefaultWALProvider.SPLITTING_EXT);
+        // Rename the directory so a rogue RS doesn't create more WALs
         if (fs.exists(logDir)) {
           if (!this.fs.rename(logDir, splitDir)) {
             throw new IOException("Failed fs.rename for log split: " + logDir);
@@ -367,9 +371,10 @@ public class MasterFileSystem {
   }
 
   /**
-   * This method is the base split method that splits HLog files matching a filter. Callers should
-   * pass the appropriate filter for meta and non-meta HLogs.
-   * @param serverNames
+   * This method is the base split method that splits WAL files matching a filter. Callers should
+   * pass the appropriate filter for meta and non-meta WALs.
+   * @param serverNames logs belonging to these servers will be split; this will rename the log
+   *                    directory out from under a soft-failed server
    * @param filter
    * @throws IOException
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystem.java
index 34547ef..45dbeb8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystem.java
@@ -31,7 +31,7 @@ public class MetricsMasterFileSystem {
   /**
    * Record a single instance of a split
    * @param time time that the split took
-   * @param size length of original HLogs that were split
+   * @param size length of original WALs that were split
    */
   public synchronized void addSplit(long time, long size) {
     source.updateSplitTime(time);
@@ -41,7 +41,7 @@ public class MetricsMasterFileSystem {
   /**
    * Record a single instance of a split
    * @param time time that the split took
-   * @param size length of original HLogs that were split
+   * @param size length of original WALs that were split
    */
   public synchronized void addMetaWALSplit(long time, long size) {
     source.updateMetaWALSplitTime(time);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
index 95d41ed..b96aaee 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
@@ -448,7 +448,7 @@ public class RegionStates {
   }
 
   /**
-   * A dead server's hlogs have been split so that all the regions
+   * A dead server's wals have been split so that all the regions
    * used to be open on it can be safely assigned now. Mark them assignable.
    */
   public synchronized void logSplit(final ServerName serverName) {
@@ -688,7 +688,7 @@ public class RegionStates {
 
   /**
    * Checking if a region was assigned to a server which is not online now.
-   * If so, we should hold re-assign this region till SSH has split its hlogs.
+   * If so, we should hold re-assign this region till SSH has split its wals.
    * Once logs are split, the last assignment of this region will be reset,
    * which means a null last assignment server is ok for re-assigning.
    *

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
index 369362b..39d0a0f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
@@ -591,7 +591,7 @@ public class ServerManager {
     this.processDeadServer(serverName, false);
   }
 
-  public synchronized void processDeadServer(final ServerName serverName, boolean shouldSplitHlog) {
+  public synchronized void processDeadServer(final ServerName serverName, boolean shouldSplitWal) {
     // When assignment manager is cleaning up the zookeeper nodes and rebuilding the
     // in-memory region states, region servers could be down. Meta table can and
     // should be re-assigned, log splitting can be done too. However, it is better to
@@ -601,14 +601,14 @@ public class ServerManager {
     // the handler threads and meta table could not be re-assigned in case
     // the corresponding server is down. So we queue them up here instead.
     if (!services.getAssignmentManager().isFailoverCleanupDone()) {
-      requeuedDeadServers.put(serverName, shouldSplitHlog);
+      requeuedDeadServers.put(serverName, shouldSplitWal);
       return;
     }
 
     this.deadservers.add(serverName);
     this.services.getExecutorService().submit(
       new ServerShutdownHandler(this.master, this.services, this.deadservers, serverName,
-          shouldSplitHlog));
+          shouldSplitWal));
   }
 
   /**
@@ -947,7 +947,7 @@ public class ServerManager {
 
   /**
    * During startup, if we figure it is not a failover, i.e. there is
-   * no more HLog files to split, we won't try to recover these dead servers.
+   * no more WAL files to split, we won't try to recover these dead servers.
    * So we just remove them from the queue. Use caution in calling this.
    */
   void removeRequeuedDeadServers() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
index bf28a44..6dd5cf1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
@@ -59,7 +59,7 @@ import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
 import org.apache.hadoop.hbase.regionserver.SplitLogWorker;
-import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
+import org.apache.hadoop.hbase.wal.DefaultWALProvider;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Pair;
@@ -102,8 +102,7 @@ public class SplitLogManager {
   private Server server;
 
   private final Stoppable stopper;
-  private FileSystem fs;
-  private Configuration conf;
+  private final Configuration conf;
 
   public static final int DEFAULT_UNASSIGNED_TIMEOUT = (3 * 60 * 1000); // 3 min
 
@@ -161,16 +160,34 @@ public class SplitLogManager {
   }
 
   private FileStatus[] getFileList(List<Path> logDirs, PathFilter filter) throws IOException {
+    return getFileList(conf, logDirs, filter);
+  }
+
+  /**
+   * Get a list of paths that need to be split given a set of server-specific directories and
+   * optinally  a filter.
+   *
+   * See {@link DefaultWALProvider#getServerNameFromWALDirectoryName} for more info on directory
+   * layout.
+   *
+   * Should be package-private, but is needed by
+   * {@link org.apache.hadoop.hbase.wal.WALSplitter#split(Path, Path, Path, FileSystem,
+   *     Configuration, WALFactory)} for tests.
+   */
+  @VisibleForTesting
+  public static FileStatus[] getFileList(final Configuration conf, final List<Path> logDirs,
+      final PathFilter filter)
+      throws IOException {
     List<FileStatus> fileStatus = new ArrayList<FileStatus>();
-    for (Path hLogDir : logDirs) {
-      this.fs = hLogDir.getFileSystem(conf);
-      if (!fs.exists(hLogDir)) {
-        LOG.warn(hLogDir + " doesn't exist. Nothing to do!");
+    for (Path logDir : logDirs) {
+      final FileSystem fs = logDir.getFileSystem(conf);
+      if (!fs.exists(logDir)) {
+        LOG.warn(logDir + " doesn't exist. Nothing to do!");
         continue;
       }
-      FileStatus[] logfiles = FSUtils.listStatus(fs, hLogDir, filter);
+      FileStatus[] logfiles = FSUtils.listStatus(fs, logDir, filter);
       if (logfiles == null || logfiles.length == 0) {
-        LOG.info(hLogDir + " is empty dir, no logs to split");
+        LOG.info(logDir + " is empty dir, no logs to split");
       } else {
         Collections.addAll(fileStatus, logfiles);
       }
@@ -180,7 +197,7 @@ public class SplitLogManager {
   }
 
   /**
-   * @param logDir one region sever hlog dir path in .logs
+   * @param logDir one region sever wal dir path in .logs
    * @throws IOException if there was an error while splitting any log file
    * @return cumulative size of the logfiles split
    * @throws IOException
@@ -206,7 +223,7 @@ public class SplitLogManager {
     Set<ServerName> serverNames = new HashSet<ServerName>();
     for (Path logDir : logDirs) {
       try {
-        ServerName serverName = HLogUtil.getServerNameFromHLogDirectoryName(logDir);
+        ServerName serverName = DefaultWALProvider.getServerNameFromWALDirectoryName(logDir);
         if (serverName != null) {
           serverNames.add(serverName);
         }
@@ -273,6 +290,7 @@ public class SplitLogManager {
     }
     for (Path logDir : logDirs) {
       status.setStatus("Cleaning up log directory...");
+      final FileSystem fs = logDir.getFileSystem(conf);
       try {
         if (fs.exists(logDir) && !fs.delete(logDir, false)) {
           LOG.warn("Unable to delete log src dir. Ignoring. " + logDir);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java
index 6c8e428..f68bfa2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java
@@ -26,10 +26,10 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Stoppable;
-import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
+import org.apache.hadoop.hbase.wal.DefaultWALProvider;
 
 /**
- * This Chore, every time it runs, will attempt to delete the HLogs in the old logs folder. The HLog
+ * This Chore, every time it runs, will attempt to delete the WALs in the old logs folder. The WAL
  * is only deleted if none of the cleaner delegates says otherwise.
  * @see BaseLogCleanerDelegate
  */
@@ -51,6 +51,6 @@ public class LogCleaner extends CleanerChore<BaseLogCleanerDelegate> {
 
   @Override
   protected boolean validate(Path file) {
-    return HLogUtil.validateHLogFilename(file.getName());
+    return DefaultWALProvider.validateWALFilename(file.getName());
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java
index 3a39fb4..9d68601 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java
@@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
 /**
- * Log cleaner that uses the timestamp of the hlog to determine if it should
+ * Log cleaner that uses the timestamp of the wal to determine if it should
  * be deleted. By default they are allowed to live for 10 minutes.
  */
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java
index 648c835..73208bc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java
@@ -67,7 +67,7 @@ public class MetaServerShutdownHandler extends ServerShutdownHandler {
       boolean distributedLogReplay = 
         (this.services.getMasterFileSystem().getLogRecoveryMode() == RecoveryMode.LOG_REPLAY);
       try {
-        if (this.shouldSplitHlog) {
+        if (this.shouldSplitWal) {
           LOG.info("Splitting hbase:meta logs for " + serverName);
           if (distributedLogReplay) {
             Set<HRegionInfo> regions = new HashSet<HRegionInfo>();
@@ -95,7 +95,7 @@ public class MetaServerShutdownHandler extends ServerShutdownHandler {
       }
 
       try {
-        if (this.shouldSplitHlog && distributedLogReplay) {
+        if (this.shouldSplitWal && distributedLogReplay) {
           if (!am.waitOnRegionToClearRegionsInTransition(HRegionInfo.FIRST_META_REGIONINFO,
             regionAssignmentWaitTimeout)) {
             // Wait here is to avoid log replay hits current dead server and incur a RPC timeout

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
index c443968..5b7b27b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
@@ -56,19 +56,19 @@ public class ServerShutdownHandler extends EventHandler {
   protected final ServerName serverName;
   protected final MasterServices services;
   protected final DeadServer deadServers;
-  protected final boolean shouldSplitHlog; // whether to split HLog or not
+  protected final boolean shouldSplitWal; // whether to split WAL or not
   protected final int regionAssignmentWaitTimeout;
 
   public ServerShutdownHandler(final Server server, final MasterServices services,
       final DeadServer deadServers, final ServerName serverName,
-      final boolean shouldSplitHlog) {
+      final boolean shouldSplitWal) {
     this(server, services, deadServers, serverName, EventType.M_SERVER_SHUTDOWN,
-        shouldSplitHlog);
+        shouldSplitWal);
   }
 
   ServerShutdownHandler(final Server server, final MasterServices services,
       final DeadServer deadServers, final ServerName serverName, EventType type,
-      final boolean shouldSplitHlog) {
+      final boolean shouldSplitWal) {
     super(server, type);
     this.serverName = serverName;
     this.server = server;
@@ -77,7 +77,7 @@ public class ServerShutdownHandler extends EventHandler {
     if (!this.deadServers.isDeadServer(this.serverName)) {
       LOG.warn(this.serverName + " is NOT in deadservers; it should be!");
     }
-    this.shouldSplitHlog = shouldSplitHlog;
+    this.shouldSplitWal = shouldSplitWal;
     this.regionAssignmentWaitTimeout = server.getConfiguration().getInt(
       HConstants.LOG_REPLAY_WAIT_REGION_TIMEOUT, 15000);
   }
@@ -133,7 +133,7 @@ public class ServerShutdownHandler extends EventHandler {
       AssignmentManager am = services.getAssignmentManager();
       ServerManager serverManager = services.getServerManager();
       if (isCarryingMeta() /* hbase:meta */ || !am.isFailoverCleanupDone()) {
-        serverManager.processDeadServer(serverName, this.shouldSplitHlog);
+        serverManager.processDeadServer(serverName, this.shouldSplitWal);
         return;
       }
 
@@ -180,7 +180,7 @@ public class ServerShutdownHandler extends EventHandler {
         (this.services.getMasterFileSystem().getLogRecoveryMode() == RecoveryMode.LOG_REPLAY);
 
       try {
-        if (this.shouldSplitHlog) {
+        if (this.shouldSplitWal) {
           if (distributedLogReplay) {
             LOG.info("Mark regions in recovery for crashed server " + serverName +
               " before assignment; regions=" + hris);
@@ -275,13 +275,13 @@ public class ServerShutdownHandler extends EventHandler {
         throw (InterruptedIOException)new InterruptedIOException().initCause(ie);
       } catch (IOException ioe) {
         LOG.info("Caught " + ioe + " during region assignment, will retry");
-        // Only do HLog splitting if shouldSplitHlog and in DLR mode
+        // Only do wal splitting if shouldSplitWal and in DLR mode
         serverManager.processDeadServer(serverName,
-          this.shouldSplitHlog && distributedLogReplay);
+          this.shouldSplitWal && distributedLogReplay);
         return;
       }
 
-      if (this.shouldSplitHlog && distributedLogReplay) {
+      if (this.shouldSplitWal && distributedLogReplay) {
         // wait for region assignment completes
         for (HRegionInfo hri : toAssignRegions) {
           try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotLogCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotLogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotLogCleaner.java
index d5e174d..a927db3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotLogCleaner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotLogCleaner.java
@@ -46,11 +46,11 @@ public class SnapshotLogCleaner extends BaseLogCleanerDelegate {
    * Conf key for the frequency to attempt to refresh the cache of hfiles currently used in
    * snapshots (ms)
    */
-  static final String HLOG_CACHE_REFRESH_PERIOD_CONF_KEY =
+  static final String WAL_CACHE_REFRESH_PERIOD_CONF_KEY =
       "hbase.master.hlogcleaner.plugins.snapshot.period";
 
   /** Refresh cache, by default, every 5 minutes */
-  private static final long DEFAULT_HLOG_CACHE_REFRESH_PERIOD = 300000;
+  private static final long DEFAULT_WAL_CACHE_REFRESH_PERIOD = 300000;
 
   private SnapshotFileCache cache;
 
@@ -77,14 +77,14 @@ public class SnapshotLogCleaner extends BaseLogCleanerDelegate {
     super.setConf(conf);
     try {
       long cacheRefreshPeriod = conf.getLong(
-        HLOG_CACHE_REFRESH_PERIOD_CONF_KEY, DEFAULT_HLOG_CACHE_REFRESH_PERIOD);
+        WAL_CACHE_REFRESH_PERIOD_CONF_KEY, DEFAULT_WAL_CACHE_REFRESH_PERIOD);
       final FileSystem fs = FSUtils.getCurrentFileSystem(conf);
       Path rootDir = FSUtils.getRootDir(conf);
       cache = new SnapshotFileCache(fs, rootDir, cacheRefreshPeriod, cacheRefreshPeriod,
           "snapshot-log-cleaner-cache-refresher", new SnapshotFileCache.SnapshotFileInspector() {
             public Collection<String> filesUnderSnapshot(final Path snapshotDir)
                 throws IOException {
-              return SnapshotReferenceUtil.getHLogNames(fs, snapshotDir);
+              return SnapshotReferenceUtil.getWALNames(fs, snapshotDir);
             }
           });
     } catch (IOException e) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java
index 0310733..19bfa8c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java
@@ -50,9 +50,8 @@ import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
-import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
-import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALFactory;
 import org.apache.hadoop.hbase.security.access.AccessControlLists;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -382,13 +381,11 @@ public class NamespaceUpgrade implements Tool {
 
 
     ServerName fakeServer = ServerName.valueOf("nsupgrade", 96, 123);
-    String metaLogName = HLogUtil.getHLogDirectoryName(fakeServer.toString());
-    HLog metaHLog = HLogFactory.createMetaHLog(fs, rootDir,
-        metaLogName, conf, null,
-        fakeServer.toString());
+    final WALFactory walFactory = new WALFactory(conf, null, fakeServer.toString());
+    WAL metawal = walFactory.getMetaWAL(HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes());
     FSTableDescriptors fst = new FSTableDescriptors(conf);
     HRegion meta = HRegion.openHRegion(rootDir, HRegionInfo.FIRST_META_REGIONINFO,
-        fst.get(TableName.META_TABLE_NAME), metaHLog, conf);
+        fst.get(TableName.META_TABLE_NAME), metawal, conf);
     HRegion region = null;
     try {
       for(Path regionDir : FSUtils.getRegionDirs(fs, oldTablePath)) {
@@ -405,7 +402,7 @@ public class NamespaceUpgrade implements Tool {
             new HRegion(
                 HRegionFileSystem.openRegionFromFileSystem(conf, fs, oldTablePath,
                     oldRegionInfo, false),
-                metaHLog,
+                metawal,
                 conf,
                 oldDesc,
                 null);
@@ -442,7 +439,7 @@ public class NamespaceUpgrade implements Tool {
       meta.flushcache();
       meta.waitForFlushesAndCompactions();
       meta.close();
-      metaHLog.closeAndDelete();
+      metawal.close();
       if(region != null) {
         region.close();
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b94e6738/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
index 2e5fc41..d6a120b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
@@ -39,8 +39,8 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
-import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
+import org.apache.hadoop.hbase.wal.WAL.Entry;
+import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.hadoop.hbase.util.Pair;
@@ -50,14 +50,14 @@ import com.google.protobuf.ServiceException;
 @InterfaceAudience.Private
 public class ReplicationProtbufUtil {
   /**
-   * A helper to replicate a list of HLog entries using admin protocol.
+   * A helper to replicate a list of WAL entries using admin protocol.
    *
    * @param admin
    * @param entries
    * @throws java.io.IOException
    */
   public static void replicateWALEntry(final AdminService.BlockingInterface admin,
-      final HLog.Entry[] entries) throws IOException {
+      final Entry[] entries) throws IOException {
     Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner> p =
       buildReplicateWALEntryRequest(entries, null);
     PayloadCarryingRpcController controller = new PayloadCarryingRpcController(p.getSecond());
@@ -69,27 +69,27 @@ public class ReplicationProtbufUtil {
   }
 
   /**
-   * Create a new ReplicateWALEntryRequest from a list of HLog entries
+   * Create a new ReplicateWALEntryRequest from a list of WAL entries
    *
-   * @param entries the HLog entries to be replicated
+   * @param entries the WAL entries to be replicated
    * @return a pair of ReplicateWALEntryRequest and a CellScanner over all the WALEdit values
    * found.
    */
   public static Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner>
-      buildReplicateWALEntryRequest(final HLog.Entry[] entries) {
+      buildReplicateWALEntryRequest(final Entry[] entries) {
     return buildReplicateWALEntryRequest(entries, null);
   }
 
   /**
-   * Create a new ReplicateWALEntryRequest from a list of HLog entries
+   * Create a new ReplicateWALEntryRequest from a list of WAL entries
    *
-   * @param entries the HLog entries to be replicated
+   * @param entries the WAL entries to be replicated
    * @param encodedRegionName alternative region name to use if not null
    * @return a pair of ReplicateWALEntryRequest and a CellScanner over all the WALEdit values
    * found.
    */
   public static Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner>
-      buildReplicateWALEntryRequest(final HLog.Entry[] entries, byte[] encodedRegionName) {
+      buildReplicateWALEntryRequest(final Entry[] entries, byte[] encodedRegionName) {
     // Accumulate all the Cells seen in here.
     List<List<? extends Cell>> allCells = new ArrayList<List<? extends Cell>>(entries.length);
     int size = 0;
@@ -98,11 +98,11 @@ public class ReplicationProtbufUtil {
     AdminProtos.ReplicateWALEntryRequest.Builder builder =
       AdminProtos.ReplicateWALEntryRequest.newBuilder();
     HBaseProtos.UUID.Builder uuidBuilder = HBaseProtos.UUID.newBuilder();
-    for (HLog.Entry entry: entries) {
+    for (Entry entry: entries) {
       entryBuilder.clear();
-      // TODO: this duplicates a lot in HLogKey#getBuilder
+      // TODO: this duplicates a lot in WALKey#getBuilder
       WALProtos.WALKey.Builder keyBuilder = entryBuilder.getKeyBuilder();
-      HLogKey key = entry.getKey();
+      WALKey key = entry.getKey();
       keyBuilder.setEncodedRegionName(
         ByteStringer.wrap(encodedRegionName == null
             ? key.getEncodedRegionName()


Mime
View raw message