hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r1482676 [1/5] - in /hbase/branches/0.95: hbase-client/src/main/java/org/apache/hadoop/hbase/ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ hbase-client/src/main/ja...
Date Wed, 15 May 2013 04:25:58 GMT
Author: stack
Date: Wed May 15 04:25:57 2013
New Revision: 1482676

URL: http://svn.apache.org/r1482676
Log:
HBASE-7006 [MTTR] Improve Region Server Recovery Time - Distributed Log Replay

Added:
    hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionInRecoveryException.java
    hbase/branches/0.95/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsEditsReplaySource.java
    hbase/branches/0.95/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsEditsReplaySourceImpl.java
    hbase/branches/0.95/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.wal.MetricsEditsReplaySource
    hbase/branches/0.95/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsEditsReplaySourceImpl.java
    hbase/branches/0.95/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.wal.MetricsEditsReplaySource
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALEditsReplay.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoveringRegionWatcher.java
Modified:
    hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
    hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
    hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java
    hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeper.java
    hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTable.java
    hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
    hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
    hbase/branches/0.95/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
    hbase/branches/0.95/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
    hbase/branches/0.95/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
    hbase/branches/0.95/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java
    hbase/branches/0.95/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
    hbase/branches/0.95/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java
    hbase/branches/0.95/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
    hbase/branches/0.95/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java
    hbase/branches/0.95/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
    hbase/branches/0.95/hbase-protocol/src/main/protobuf/Admin.proto
    hbase/branches/0.95/hbase-protocol/src/main/protobuf/hbase.proto
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LastSequenceId.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogUtil.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java
    hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java
    hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorExceptionWithAbort.java
    hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
    hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
    hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
    hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
    hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystem.java
    hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
    hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSKilledWhenMasterInitializing.java
    hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java
    hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockRegionServerServices.java

Modified: hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java?rev=1482676&r1=1482675&r2=1482676&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java (original)
+++ hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java Wed May 15 04:25:57 2013
@@ -185,6 +185,9 @@ public class HRegionInfo implements Comp
   private byte[] tableName = null;
   private String tableNameAsString = null;
 
+  // when a region is in recovering state, it can only accept writes not reads
+  private volatile boolean recovering = false;
+
   /** HRegionInfo for root region */
   public static final HRegionInfo ROOT_REGIONINFO =
       new HRegionInfo(0L, Bytes.toBytes("-ROOT-"));
@@ -293,6 +296,7 @@ public class HRegionInfo implements Comp
     this.startKey = startKey == null?
       HConstants.EMPTY_START_ROW: startKey.clone();
     this.tableName = tableName.clone();
+    this.recovering = false;
     setHashCode();
   }
 
@@ -313,6 +317,7 @@ public class HRegionInfo implements Comp
     this.hashCode = other.hashCode();
     this.encodedName = other.getEncodedName();
     this.tableName = other.tableName;
+    this.recovering = other.isRecovering();
   }
 
 
@@ -597,6 +602,20 @@ public class HRegionInfo implements Comp
   }
 
   /**
+   * @return True if current region is in recovering
+   */
+  public boolean isRecovering() {
+    return this.recovering;
+  }
+
+  /**
+   * @param newState set recovering state
+   */
+  public void setRecovering(boolean newState) {
+    this.recovering = newState;
+  }
+
+  /**
    * @return True if this region is offline.
    */
   public boolean isOffline() {
@@ -833,6 +852,7 @@ public class HRegionInfo implements Comp
     }
     builder.setOffline(info.isOffline());
     builder.setSplit(info.isSplit());
+    builder.setRecovering(info.isRecovering());
     return builder.build();
   }
 
@@ -865,6 +885,9 @@ public class HRegionInfo implements Comp
     if (proto.hasOffline()) {
       hri.setOffline(proto.getOffline());
     }
+    if (proto.hasRecovering()) {
+      hri.setRecovering(proto.getRecovering());
+    }
     return hri;
   }
 

Modified: hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java?rev=1482676&r1=1482675&r2=1482676&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java (original)
+++ hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java Wed May 15 04:25:57 2013
@@ -18,6 +18,14 @@
 
 package org.apache.hadoop.hbase.client;
 
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+import java.util.UUID;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hbase.Cell;
@@ -31,14 +39,6 @@ import org.apache.hadoop.hbase.io.HeapSi
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ClassSize;
 
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableMap;
-import java.util.TreeMap;
-import java.util.UUID;
-
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public abstract class Mutation extends OperationWithAttributes implements Row, CellScannable,
@@ -63,6 +63,7 @@ public abstract class Mutation extends O
   protected byte [] row = null;
   protected long ts = HConstants.LATEST_TIMESTAMP;
   protected Durability durability = Durability.USE_DEFAULT;
+  
   // A Map sorted by column family.
   protected NavigableMap<byte [], List<? extends Cell>> familyMap =
     new TreeMap<byte [], List<? extends Cell>>(Bytes.BYTES_COMPARATOR);

Modified: hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java?rev=1482676&r1=1482675&r2=1482676&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java (original)
+++ hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java Wed May 15 04:25:57 2013
@@ -178,7 +178,7 @@ public abstract class ServerCallable<T> 
         if (t instanceof SocketTimeoutException ||
             t instanceof ConnectException ||
             t instanceof RetriesExhaustedException ||
-            getConnection().isDeadServer(location.getServerName())) {
+            (location != null && getConnection().isDeadServer(location.getServerName()))) {
           // if thrown these exceptions, we clear all the cache entries that
           // map to that slow/dead server; otherwise, let cache miss and ask
           // .META. again to find the new location
@@ -261,7 +261,9 @@ public abstract class ServerCallable<T> 
    */
   protected static Throwable translateException(Throwable t) throws DoNotRetryIOException {
     if (t instanceof UndeclaredThrowableException) {
-      t = t.getCause();
+      if(t.getCause() != null) {
+        t = t.getCause();
+      }
     }
     if (t instanceof RemoteException) {
       t = ((RemoteException)t).unwrapRemoteException();

Added: hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionInRecoveryException.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionInRecoveryException.java?rev=1482676&view=auto
==============================================================================
--- hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionInRecoveryException.java (added)
+++ hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionInRecoveryException.java Wed May 15 04:25:57 2013
@@ -0,0 +1,45 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.exceptions;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Thrown when a read request issued against a region which is in recovering state.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class RegionInRecoveryException extends NotServingRegionException {
+  private static final long serialVersionUID = 327302071153799L;
+
+  /** default constructor */
+  public RegionInRecoveryException() {
+    super();
+  }
+
+  /**
+   * Constructor
+   * @param s message
+   */
+  public RegionInRecoveryException(String s) {
+    super(s);
+  }
+
+}

Modified: hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeper.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeper.java?rev=1482676&r1=1482675&r2=1482676&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeper.java (original)
+++ hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeper.java Wed May 15 04:25:57 2013
@@ -432,6 +432,30 @@ public class ReplicationZookeeper implem
   }
 
   /**
+   * @param position
+   * @return Serialized protobuf of <code>position</code> with pb magic prefix
+   *         prepended suitable for use as content of an hlog position in a
+   *         replication queue.
+   */
+  public static byte[] positionToByteArray(
+      final long position) {
+    return ZKUtil.positionToByteArray(position);
+  }
+
+  /**
+   * @param lockOwner
+   * @return Serialized protobuf of <code>lockOwner</code> with pb magic prefix
+   *         prepended suitable for use as content of an replication lock during
+   *         region server fail over.
+   */
+  static byte[] lockToByteArray(
+      final String lockOwner) {
+    byte[] bytes = ZooKeeperProtos.ReplicationLock.newBuilder().setLockOwner(lockOwner).build()
+        .toByteArray();
+    return ProtobufUtil.prependPBMagic(bytes);
+  }
+
+  /**
    * @param bytes Content of a peer znode.
    * @return ClusterKey parsed from the passed bytes.
    * @throws DeserializationException
@@ -476,6 +500,42 @@ public class ReplicationZookeeper implem
     }
   }
 
+  /**
+   * @param bytes - Content of a HLog position znode.
+   * @return long - The current HLog position.
+   * @throws DeserializationException
+   */
+  public static long parseHLogPositionFrom(
+      final byte[] bytes) throws DeserializationException {
+    return ZKUtil.parseHLogPositionFrom(bytes);
+  }
+
+  /**
+   * @param bytes - Content of a lock znode.
+   * @return String - The owner of the lock.
+   * @throws DeserializationException
+   */
+  static String parseLockOwnerFrom(
+      final byte[] bytes) throws DeserializationException {
+    if (ProtobufUtil.isPBMagicPrefix(bytes)) {
+      int pblen = ProtobufUtil.lengthOfPBMagic();
+      ZooKeeperProtos.ReplicationLock.Builder builder = ZooKeeperProtos.ReplicationLock
+          .newBuilder();
+      ZooKeeperProtos.ReplicationLock lock;
+      try {
+        lock = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
+      } catch (InvalidProtocolBufferException e) {
+        throw new DeserializationException(e);
+      }
+      return lock.getLockOwner();
+    } else {
+      if (bytes.length > 0) {
+        return Bytes.toString(bytes);
+      }
+      return "";
+    }
+  }
+
   private boolean peerExists(String id) throws KeeperException {
     return ZKUtil.checkExists(this.zookeeper,
           ZKUtil.joinZNode(this.peersZNode, id)) >= 0;

Modified: hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTable.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTable.java?rev=1482676&r1=1482675&r2=1482676&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTable.java (original)
+++ hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTable.java Wed May 15 04:25:57 2013
@@ -376,6 +376,7 @@ public class ZKTable {
     Set<String> allTables = new HashSet<String>();
     List<String> children =
       ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode);
+    if(children == null) return allTables;
     for (String child: children) {
       ZooKeeperProtos.Table.State state = ZKTableReadOnly.getTableState(zkw, child);
       for (ZooKeeperProtos.Table.State expectedState: states) {

Modified: hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java?rev=1482676&r1=1482675&r2=1482676&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java (original)
+++ hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java Wed May 15 04:25:57 2013
@@ -27,6 +27,8 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp.CreateAndFailSilent;
@@ -50,6 +52,8 @@ import org.apache.zookeeper.proto.Delete
 import org.apache.zookeeper.proto.SetDataRequest;
 import org.apache.zookeeper.server.ZooKeeperSaslServer;
 
+import com.google.protobuf.InvalidProtocolBufferException;
+
 import javax.security.auth.login.AppConfigurationEntry;
 import javax.security.auth.login.AppConfigurationEntry.LoginModuleControlFlag;
 import java.io.BufferedReader;
@@ -1815,4 +1819,39 @@ public class ZKUtil {
     }
   }
 
+  /**
+   * @param position
+   * @return Serialized protobuf of <code>position</code> with pb magic prefix prepended suitable
+   *         for use as content of an hlog position in a replication queue.
+   */
+  public static byte[] positionToByteArray(final long position) {
+    byte[] bytes = ZooKeeperProtos.ReplicationHLogPosition.newBuilder().setPosition(position)
+        .build().toByteArray();
+    return ProtobufUtil.prependPBMagic(bytes);
+  }
+
+  /**
+   * @param bytes - Content of a HLog position znode.
+   * @return long - The current HLog position.
+   * @throws DeserializationException
+   */
+  public static long parseHLogPositionFrom(final byte[] bytes) throws DeserializationException {
+    if (ProtobufUtil.isPBMagicPrefix(bytes)) {
+      int pblen = ProtobufUtil.lengthOfPBMagic();
+      ZooKeeperProtos.ReplicationHLogPosition.Builder builder = 
+          ZooKeeperProtos.ReplicationHLogPosition.newBuilder();
+      ZooKeeperProtos.ReplicationHLogPosition position;
+      try {
+        position = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
+      } catch (InvalidProtocolBufferException e) {
+        throw new DeserializationException(e);
+      }
+      return position.getPosition();
+    } else {
+      if (bytes.length > 0) {
+        return Bytes.toLong(bytes);
+      }
+      return 0;
+    }
+  }
 }

Modified: hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java?rev=1482676&r1=1482675&r2=1482676&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java (original)
+++ hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java Wed May 15 04:25:57 2013
@@ -107,6 +107,8 @@ public class ZooKeeperWatcher implements
   public String balancerZNode;
   // znode containing the lock for the tables
   public String tableLockZNode;
+  // znode containing the state of recovering regions
+  public String recoveringRegionsZNode;
 
   // Certain ZooKeeper nodes need to be world-readable
   public static final ArrayList<ACL> CREATOR_ALL_AND_WORLD_READABLE =
@@ -133,11 +135,11 @@ public class ZooKeeperWatcher implements
 
   /**
    * Instantiate a ZooKeeper connection and watcher.
-   * @param identifier string that is passed to RecoverableZookeeper to be used as
-   * identifier for this instance. Use null for default.
    * @param conf
+   * @param identifier string that is passed to RecoverableZookeeper to be used as identifier for
+   *          this instance. Use null for default.
    * @param abortable Can be null if there is on error there is no host to abort: e.g. client
-   * context.
+   *          context.
    * @param canCreateBaseZNode
    * @throws IOException
    * @throws ZooKeeperConnectionException
@@ -176,6 +178,7 @@ public class ZooKeeperWatcher implements
       ZKUtil.createAndFailSilent(this, splitLogZNode);
       ZKUtil.createAndFailSilent(this, backupMasterAddressesZNode);
       ZKUtil.createAndFailSilent(this, tableLockZNode);
+      ZKUtil.createAndFailSilent(this, recoveringRegionsZNode);
     } catch (KeeperException e) {
       throw new ZooKeeperConnectionException(
           prefix("Unexpected KeeperException creating base node"), e);
@@ -227,6 +230,8 @@ public class ZooKeeperWatcher implements
         conf.get("zookeeper.znode.balancer", "balancer"));
     tableLockZNode = ZKUtil.joinZNode(baseZNode,
         conf.get("zookeeper.znode.tableLock", "table-lock"));
+    recoveringRegionsZNode = ZKUtil.joinZNode(baseZNode,
+      conf.get("zookeeper.znode.recovering.regions", "recovering-regions"));
   }
 
   /**

Modified: hbase/branches/0.95/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java?rev=1482676&r1=1482675&r2=1482676&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java (original)
+++ hbase/branches/0.95/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java Wed May 15 04:25:57 2013
@@ -717,6 +717,14 @@ public final class HConstants {
   public static final String DISTRIBUTED_LOG_SPLITTING_KEY =
       "hbase.master.distributed.log.splitting";
 
+  /** Conf key that enables unflushed WAL edits directly being replayed to region servers */
+  public static final String DISTRIBUTED_LOG_REPLAY_KEY = "hbase.master.distributed.log.replay";
+  public static final boolean DEFAULT_DISTRIBUTED_LOG_REPLAY_CONFIG = true;
+
+  /** Conf key that specifies timeout value to wait for a region ready */
+  public static final String LOG_REPLAY_WAIT_REGION_TIMEOUT = 
+      "hbase.master.log.replay.wait.region.timeout";
+
   /**
    * The name of the configuration parameter that specifies
    * the number of bytes in a newly created checksum chunk.
@@ -767,6 +775,7 @@ public final class HConstants {
   public static final int QOS_THRESHOLD = 10;
   public static final int HIGH_QOS = 100;
   public static final int REPLICATION_QOS = 5; // normal_QOS < replication_QOS < high_QOS
+  public static final int REPLAY_QOS = 6; // REPLICATION_QOS < REPLAY_QOS < high_QOS
 
   /** Directory under /hbase where archived hfiles are stored */
   public static final String HFILE_ARCHIVE_DIRECTORY = ".archive";

Modified: hbase/branches/0.95/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java?rev=1482676&r1=1482675&r2=1482676&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java (original)
+++ hbase/branches/0.95/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java Wed May 15 04:25:57 2013
@@ -60,6 +60,8 @@ public interface MetricsMasterSource ext
   static final String SNAPSHOT_TIME_NAME = "snapshotTime";
   static final String SNAPSHOT_RESTORE_TIME_NAME = "snapshotRestoreTime";
   static final String SNAPSHOT_CLONE_TIME_NAME = "snapshotCloneTime";
+  static final String META_SPLIT_TIME_NAME = "metaHlogSplitTime";
+  static final String META_SPLIT_SIZE_NAME = "metaHlogSplitSize";
   static final String CLUSTER_REQUESTS_NAME = "clusterRequests";
   static final String RIT_COUNT_NAME = "ritCount";
   static final String RIT_COUNT_OVER_THRESHOLD_NAME = "ritCountOverThreshold";
@@ -78,7 +80,8 @@ public interface MetricsMasterSource ext
   static final String SNAPSHOT_TIME_DESC = "Time it takes to finish snapshot()";
   static final String SNAPSHOT_RESTORE_TIME_DESC = "Time it takes to finish restoreSnapshot()";
   static final String SNAPSHOT_CLONE_TIME_DESC = "Time it takes to finish cloneSnapshot()";
-
+  static final String META_SPLIT_TIME_DESC = "Time it takes to finish splitMetaLog()";
+  static final String META_SPLIT_SIZE_DESC = "Size of META HLog files being split";
 
   /**
    * Increment the number of requests the cluster has seen.
@@ -117,4 +120,9 @@ public interface MetricsMasterSource ext
   void updateSnapshotCloneTime(long time);
 
   void updateSnapshotRestoreTime(long time);
+  
+  void updateMetaWALSplitTime(long time);
+
+  void updateMetaWALSplitSize(long size);
+
 }

Modified: hbase/branches/0.95/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java?rev=1482676&r1=1482675&r2=1482676&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java (original)
+++ hbase/branches/0.95/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java Wed May 15 04:25:57 2013
@@ -81,6 +81,13 @@ public interface MetricsRegionServerSour
   void updateAppend(long t);
 
   /**
+   * Update the Replay time histogram.
+   *
+   * @param t time it took
+   */
+  void updateReplay(long t);
+
+  /**
    * Increment the number of slow Puts that have happened.
    */
   void incrSlowPut();
@@ -188,6 +195,7 @@ public interface MetricsRegionServerSour
   static final String INCREMENT_KEY = "increment";
   static final String MUTATE_KEY = "mutate";
   static final String APPEND_KEY = "append";
+  static final String REPLAY_KEY = "replay";
   static final String SCAN_NEXT_KEY = "scanNext";
   static final String SLOW_MUTATE_KEY = "slowPutCount";
   static final String SLOW_GET_KEY = "slowGetCount";

Added: hbase/branches/0.95/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsEditsReplaySource.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsEditsReplaySource.java?rev=1482676&view=auto
==============================================================================
--- hbase/branches/0.95/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsEditsReplaySource.java (added)
+++ hbase/branches/0.95/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsEditsReplaySource.java Wed May 15 04:25:57 2013
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver.wal;
+
+import org.apache.hadoop.hbase.metrics.BaseSource;
+
+/**
+ * Interface of the source that will export metrics about log replay statistics when recovering a
+ * region server in distributedLogReplay mode
+ */
+public interface MetricsEditsReplaySource extends BaseSource {
+
+  /**
+   * The name of the metrics
+   */
+  static final String METRICS_NAME = "replay";
+
+  /**
+   * The name of the metrics context that metrics will be under.
+   */
+  static final String METRICS_CONTEXT = "regionserver";
+
+  /**
+   * Description
+   */
+  static final String METRICS_DESCRIPTION = "Metrics about HBase RegionServer HLog Edits Replay";
+
+  /**
+   * The name of the metrics context that metrics will be under in jmx
+   */
+  static final String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME;
+
+
+  static final String REPLAY_TIME_NAME = "replayTime";
+  static final String REPLAY_TIME_DESC = "Time an replay operation took.";
+  static final String REPLAY_BATCH_SIZE_NAME = "replayBatchSize";
+  static final String REPLAY_BATCH_SIZE_DESC = "Number of changes in each replay batch.";
+  static final String REPLAY_DATA_SIZE_NAME = "replayDataSize";
+  static final String REPLAY_DATA_SIZE_DESC = "Size (in bytes) of the data of each replay.";
+
+  /**
+   * Add the time a replay command took
+   */
+  void updateReplayTime(long time);
+
+  /**
+   * Add the batch size of each replay
+   */
+  void updateReplayBatchSize(long size);
+
+  /**
+   * Add the payload data size of each replay
+   */
+  void updateReplayDataSize(long size);
+
+}

Modified: hbase/branches/0.95/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java?rev=1482676&r1=1482675&r2=1482676&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java (original)
+++ hbase/branches/0.95/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java Wed May 15 04:25:57 2013
@@ -48,6 +48,8 @@ public class MetricsMasterSourceImpl
   private MetricMutableStat snapshotTimeHisto;
   private MetricMutableStat snapshotCloneTimeHisto;
   private MetricMutableStat snapshotRestoreTimeHisto;
+  private MetricMutableHistogram metaSplitTimeHisto;
+  private MetricMutableHistogram metaSplitSizeHisto;
 
   public MetricsMasterSourceImpl(MetricsMasterWrapper masterWrapper) {
     this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, masterWrapper);
@@ -77,6 +79,8 @@ public class MetricsMasterSourceImpl
         SNAPSHOT_CLONE_TIME_NAME, SNAPSHOT_CLONE_TIME_DESC, "Ops", "Time", true);
     snapshotRestoreTimeHisto = metricsRegistry.newStat(
         SNAPSHOT_RESTORE_TIME_NAME, SNAPSHOT_RESTORE_TIME_DESC, "Ops", "Time", true);
+    metaSplitTimeHisto = metricsRegistry.newHistogram(META_SPLIT_TIME_NAME, META_SPLIT_TIME_DESC);
+    metaSplitSizeHisto = metricsRegistry.newHistogram(META_SPLIT_SIZE_NAME, META_SPLIT_SIZE_DESC);
   }
 
   public void incRequests(final int inc) {
@@ -120,6 +124,16 @@ public class MetricsMasterSourceImpl
     snapshotRestoreTimeHisto.add(time);
   }
 
+  @Override
+  public void updateMetaWALSplitTime(long time) {
+    metaSplitTimeHisto.add(time);
+  }
+
+  @Override
+  public void updateMetaWALSplitSize(long size) {
+    metaSplitSizeHisto.add(size);
+  }
+
   /**
    * Method to export all the metrics.
    *

Modified: hbase/branches/0.95/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java?rev=1482676&r1=1482675&r2=1482676&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java (original)
+++ hbase/branches/0.95/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java Wed May 15 04:25:57 2013
@@ -38,6 +38,7 @@ public class MetricsRegionServerSourceIm
   private final MetricHistogram getHisto;
   private final MetricHistogram incrementHisto;
   private final MetricHistogram appendHisto;
+  private final MetricHistogram replayHisto;
   private final MetricMutableCounterLong slowPut;
   private final MetricMutableCounterLong slowDelete;
   private final MetricMutableCounterLong slowGet;
@@ -70,6 +71,8 @@ public class MetricsRegionServerSourceIm
 
     appendHisto = getMetricsRegistry().newHistogram(APPEND_KEY);
     slowAppend = getMetricsRegistry().newCounter(SLOW_APPEND_KEY, SLOW_APPEND_DESC, 0l);
+
+    replayHisto = getMetricsRegistry().newHistogram(REPLAY_KEY);
   }
 
   @Override
@@ -98,6 +101,11 @@ public class MetricsRegionServerSourceIm
   }
 
   @Override
+  public void updateReplay(long t) {
+    replayHisto.add(t);
+  }
+
+  @Override
   public void incrSlowPut() {
     slowPut.incr();
   }

Added: hbase/branches/0.95/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsEditsReplaySourceImpl.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsEditsReplaySourceImpl.java?rev=1482676&view=auto
==============================================================================
--- hbase/branches/0.95/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsEditsReplaySourceImpl.java (added)
+++ hbase/branches/0.95/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsEditsReplaySourceImpl.java Wed May 15 04:25:57 2013
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver.wal;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
+import org.apache.hadoop.metrics2.lib.MetricMutableHistogram;
+
+/**
+ * Hadoop1 implementation of MetricsMasterSource.
+ *
+ * Implements BaseSource through BaseSourceImpl, following the pattern
+ */
+public class MetricsEditsReplaySourceImpl
+    extends BaseSourceImpl implements MetricsEditsReplaySource {
+
+  private static final Log LOG = LogFactory.getLog(MetricsEditsReplaySourceImpl.class.getName());
+
+  private MetricMutableHistogram replayTimeHisto;
+  private MetricMutableHistogram replayBatchSizeHisto;
+  private MetricMutableHistogram replayDataSizeHisto;
+
+  public MetricsEditsReplaySourceImpl() {
+    this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
+  }
+
+  public MetricsEditsReplaySourceImpl(String metricsName,
+                                 String metricsDescription,
+                                 String metricsContext,
+                                 String metricsJmxContext) {
+    super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
+  }
+
+  @Override
+  public void init() {
+    super.init();
+    replayTimeHisto = metricsRegistry.newHistogram(REPLAY_TIME_NAME, REPLAY_TIME_DESC);
+    replayBatchSizeHisto = 
+      metricsRegistry.newHistogram(REPLAY_BATCH_SIZE_NAME, REPLAY_BATCH_SIZE_DESC);
+    replayDataSizeHisto = 
+      metricsRegistry.newHistogram(REPLAY_DATA_SIZE_NAME, REPLAY_DATA_SIZE_DESC);
+  }
+
+  @Override
+  public void updateReplayTime(long time) {
+    replayTimeHisto.add(time);
+  }
+
+  @Override
+  public void updateReplayBatchSize(long size) {
+    replayBatchSizeHisto.add(size);
+  }
+
+  @Override
+  public void updateReplayDataSize(long size) {
+    replayDataSizeHisto.add(size);
+  }
+}

Added: hbase/branches/0.95/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.wal.MetricsEditsReplaySource
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.wal.MetricsEditsReplaySource?rev=1482676&view=auto
==============================================================================
--- hbase/branches/0.95/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.wal.MetricsEditsReplaySource (added)
+++ hbase/branches/0.95/hbase-hadoop1-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.wal.MetricsEditsReplaySource Wed May 15 04:25:57 2013
@@ -0,0 +1 @@
+org.apache.hadoop.hbase.regionserver.wal.MetricsEditsReplaySourceImpl

Modified: hbase/branches/0.95/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java?rev=1482676&r1=1482675&r2=1482676&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java (original)
+++ hbase/branches/0.95/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java Wed May 15 04:25:57 2013
@@ -45,6 +45,8 @@ public class MetricsMasterSourceImpl
   private MutableStat snapshotTimeHisto;
   private MutableStat snapshotCloneTimeHisto;
   private MutableStat snapshotRestoreTimeHisto;
+  private MutableHistogram metaSplitTimeHisto;
+  private MutableHistogram metaSplitSizeHisto;
 
   public MetricsMasterSourceImpl(MetricsMasterWrapper masterWrapper) {
     this(METRICS_NAME,
@@ -79,6 +81,8 @@ public class MetricsMasterSourceImpl
         SNAPSHOT_CLONE_TIME_NAME, SNAPSHOT_CLONE_TIME_DESC, "Ops", "Time", true);
     snapshotRestoreTimeHisto = metricsRegistry.newStat(
         SNAPSHOT_RESTORE_TIME_NAME, SNAPSHOT_RESTORE_TIME_DESC, "Ops", "Time", true);
+    metaSplitTimeHisto = metricsRegistry.newHistogram(META_SPLIT_TIME_NAME, META_SPLIT_TIME_DESC);
+    metaSplitSizeHisto = metricsRegistry.newHistogram(META_SPLIT_SIZE_NAME, META_SPLIT_SIZE_DESC);
   }
 
   public void incRequests(final int inc) {
@@ -123,6 +127,16 @@ public class MetricsMasterSourceImpl
   }
 
   @Override
+  public void updateMetaWALSplitTime(long time) {
+    metaSplitTimeHisto.add(time);
+  }
+
+  @Override
+  public void updateMetaWALSplitSize(long size) {
+    metaSplitSizeHisto.add(size);
+  }
+
+  @Override
   public void getMetrics(MetricsCollector metricsCollector, boolean all) {
 
     MetricsRecordBuilder metricsRecordBuilder = metricsCollector.addRecord(metricsName)

Modified: hbase/branches/0.95/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java?rev=1482676&r1=1482675&r2=1482676&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java (original)
+++ hbase/branches/0.95/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java Wed May 15 04:25:57 2013
@@ -41,6 +41,7 @@ public class MetricsRegionServerSourceIm
   private final MetricHistogram getHisto;
   private final MetricHistogram incrementHisto;
   private final MetricHistogram appendHisto;
+  private final MetricHistogram replayHisto;
 
   private final MutableCounterLong slowPut;
   private final MutableCounterLong slowDelete;
@@ -75,6 +76,8 @@ public class MetricsRegionServerSourceIm
 
     appendHisto = getMetricsRegistry().newHistogram(APPEND_KEY);
     slowAppend = getMetricsRegistry().newCounter(SLOW_APPEND_KEY, SLOW_APPEND_DESC, 0l);
+    
+    replayHisto = getMetricsRegistry().newHistogram(REPLAY_KEY);
   }
 
   @Override
@@ -103,6 +106,11 @@ public class MetricsRegionServerSourceIm
   }
 
   @Override
+  public void updateReplay(long t) {
+    replayHisto.add(t);
+  }
+
+  @Override
   public void incrSlowPut() {
    slowPut.incr();
   }

Added: hbase/branches/0.95/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsEditsReplaySourceImpl.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsEditsReplaySourceImpl.java?rev=1482676&view=auto
==============================================================================
--- hbase/branches/0.95/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsEditsReplaySourceImpl.java (added)
+++ hbase/branches/0.95/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsEditsReplaySourceImpl.java Wed May 15 04:25:57 2013
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver.wal;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
+import org.apache.hadoop.metrics2.MetricHistogram;
+
+/**
+ * Hadoop1 implementation of MetricsMasterSource. Implements BaseSource through BaseSourceImpl,
+ * following the pattern
+ */
+public class MetricsEditsReplaySourceImpl extends BaseSourceImpl implements
+    MetricsEditsReplaySource {
+
+  private static final Log LOG = LogFactory.getLog(MetricsEditsReplaySourceImpl.class.getName());
+
+  private MetricHistogram replayTimeHisto;
+  private MetricHistogram replayBatchSizeHisto;
+  private MetricHistogram replayDataSizeHisto;
+
+  public MetricsEditsReplaySourceImpl() {
+    this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
+  }
+
+  public MetricsEditsReplaySourceImpl(String metricsName,
+                                      String metricsDescription,
+                                      String metricsContext,
+                                      String metricsJmxContext) {
+    super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
+  }
+
+  @Override
+  public void init() {
+    super.init();
+    replayTimeHisto = metricsRegistry.newHistogram(REPLAY_TIME_NAME, REPLAY_TIME_DESC);
+    replayBatchSizeHisto = metricsRegistry.newHistogram(REPLAY_BATCH_SIZE_NAME,
+      REPLAY_BATCH_SIZE_DESC);
+    replayDataSizeHisto = metricsRegistry
+        .newHistogram(REPLAY_DATA_SIZE_NAME, REPLAY_DATA_SIZE_DESC);
+  }
+
+  @Override
+  public void updateReplayTime(long time) {
+    replayTimeHisto.add(time);
+  }
+
+  @Override
+  public void updateReplayBatchSize(long size) {
+    replayBatchSizeHisto.add(size);
+  }
+
+  @Override
+  public void updateReplayDataSize(long size) {
+    replayDataSizeHisto.add(size);
+  }
+}

Added: hbase/branches/0.95/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.wal.MetricsEditsReplaySource
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.wal.MetricsEditsReplaySource?rev=1482676&view=auto
==============================================================================
--- hbase/branches/0.95/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.wal.MetricsEditsReplaySource (added)
+++ hbase/branches/0.95/hbase-hadoop2-compat/src/main/resources/META-INF/services/org.apache.hadoop.hbase.regionserver.wal.MetricsEditsReplaySource Wed May 15 04:25:57 2013
@@ -0,0 +1 @@
+org.apache.hadoop.hbase.regionserver.wal.MetricsEditsReplaySourceImpl

Modified: hbase/branches/0.95/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java?rev=1482676&r1=1482675&r2=1482676&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java (original)
+++ hbase/branches/0.95/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java Wed May 15 04:25:57 2013
@@ -14155,6 +14155,11 @@ public final class AdminProtos {
           org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request,
           com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse> done);
       
+      public abstract void replay(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest request,
+          com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse> done);
+      
       public abstract void rollWALWriter(
           com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest request,
@@ -14256,6 +14261,14 @@ public final class AdminProtos {
         }
         
         @java.lang.Override
+        public  void replay(
+            com.google.protobuf.RpcController controller,
+            org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest request,
+            com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse> done) {
+          impl.replay(controller, request, done);
+        }
+        
+        @java.lang.Override
         public  void rollWALWriter(
             com.google.protobuf.RpcController controller,
             org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest request,
@@ -14322,10 +14335,12 @@ public final class AdminProtos {
             case 9:
               return impl.replicateWALEntry(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request);
             case 10:
-              return impl.rollWALWriter(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest)request);
+              return impl.replay(controller, (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest)request);
             case 11:
-              return impl.getServerInfo(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest)request);
+              return impl.rollWALWriter(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest)request);
             case 12:
+              return impl.getServerInfo(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest)request);
+            case 13:
               return impl.stopServer(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest)request);
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -14362,10 +14377,12 @@ public final class AdminProtos {
             case 9:
               return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance();
             case 10:
-              return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest.getDefaultInstance();
             case 11:
-              return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest.getDefaultInstance();
             case 12:
+              return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance();
+            case 13:
               return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -14402,10 +14419,12 @@ public final class AdminProtos {
             case 9:
               return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance();
             case 10:
-              return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse.getDefaultInstance();
             case 11:
-              return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance();
             case 12:
+              return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance();
+            case 13:
               return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -14465,6 +14484,11 @@ public final class AdminProtos {
         org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request,
         com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse> done);
     
+    public abstract void replay(
+        com.google.protobuf.RpcController controller,
+        org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest request,
+        com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse> done);
+    
     public abstract void rollWALWriter(
         com.google.protobuf.RpcController controller,
         org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest request,
@@ -14553,16 +14577,21 @@ public final class AdminProtos {
               done));
           return;
         case 10:
+          this.replay(controller, (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest)request,
+            com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse>specializeCallback(
+              done));
+          return;
+        case 11:
           this.rollWALWriter(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse>specializeCallback(
               done));
           return;
-        case 11:
+        case 12:
           this.getServerInfo(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse>specializeCallback(
               done));
           return;
-        case 12:
+        case 13:
           this.stopServer(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest)request,
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse>specializeCallback(
               done));
@@ -14602,10 +14631,12 @@ public final class AdminProtos {
         case 9:
           return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance();
         case 10:
-          return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest.getDefaultInstance();
         case 11:
-          return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest.getDefaultInstance();
         case 12:
+          return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance();
+        case 13:
           return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
@@ -14642,10 +14673,12 @@ public final class AdminProtos {
         case 9:
           return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance();
         case 10:
-          return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse.getDefaultInstance();
         case 11:
-          return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance();
         case 12:
+          return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance();
+        case 13:
           return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
@@ -14818,12 +14851,27 @@ public final class AdminProtos {
             org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance()));
       }
       
+      public  void replay(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest request,
+          com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse> done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(10),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse.getDefaultInstance(),
+          com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse.class,
+            org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse.getDefaultInstance()));
+      }
+      
       public  void rollWALWriter(
           com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest request,
           com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(10),
+          getDescriptor().getMethods().get(11),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance(),
@@ -14838,7 +14886,7 @@ public final class AdminProtos {
           org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest request,
           com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(11),
+          getDescriptor().getMethods().get(12),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance(),
@@ -14853,7 +14901,7 @@ public final class AdminProtos {
           org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest request,
           com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(12),
+          getDescriptor().getMethods().get(13),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance(),
@@ -14920,6 +14968,11 @@ public final class AdminProtos {
           org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request)
           throws com.google.protobuf.ServiceException;
       
+      public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse replay(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest request)
+          throws com.google.protobuf.ServiceException;
+      
       public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse rollWALWriter(
           com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest request)
@@ -15063,12 +15116,24 @@ public final class AdminProtos {
       }
       
       
+      public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse replay(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(10),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse.getDefaultInstance());
+      }
+      
+      
       public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse rollWALWriter(
           com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(10),
+          getDescriptor().getMethods().get(11),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance());
@@ -15080,7 +15145,7 @@ public final class AdminProtos {
           org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(11),
+          getDescriptor().getMethods().get(12),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance());
@@ -15092,7 +15157,7 @@ public final class AdminProtos {
           org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(12),
+          getDescriptor().getMethods().get(13),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance());
@@ -15255,78 +15320,80 @@ public final class AdminProtos {
       descriptor;
   static {
     java.lang.String[] descriptorData = {
-      "\n\013Admin.proto\032\013hbase.proto\032\tWAL.proto\"Q\n" +
-      "\024GetRegionInfoRequest\022 \n\006region\030\001 \002(\0132\020." +
-      "RegionSpecifier\022\027\n\017compactionState\030\002 \001(\010" +
-      "\"\301\001\n\025GetRegionInfoResponse\022\037\n\nregionInfo" +
-      "\030\001 \002(\0132\013.RegionInfo\022?\n\017compactionState\030\002" +
-      " \001(\0162&.GetRegionInfoResponse.CompactionS" +
-      "tate\"F\n\017CompactionState\022\010\n\004NONE\020\000\022\t\n\005MIN" +
-      "OR\020\001\022\t\n\005MAJOR\020\002\022\023\n\017MAJOR_AND_MINOR\020\003\"G\n\023" +
-      "GetStoreFileRequest\022 \n\006region\030\001 \002(\0132\020.Re" +
-      "gionSpecifier\022\016\n\006family\030\002 \003(\014\")\n\024GetStor",
-      "eFileResponse\022\021\n\tstoreFile\030\001 \003(\t\"\030\n\026GetO" +
-      "nlineRegionRequest\":\n\027GetOnlineRegionRes" +
-      "ponse\022\037\n\nregionInfo\030\001 \003(\0132\013.RegionInfo\"\270" +
-      "\001\n\021OpenRegionRequest\0223\n\010openInfo\030\001 \003(\0132!" +
-      ".OpenRegionRequest.RegionOpenInfo\032n\n\016Reg" +
-      "ionOpenInfo\022\033\n\006region\030\001 \002(\0132\013.RegionInfo" +
-      "\022\034\n\024versionOfOfflineNode\030\002 \001(\r\022!\n\014favore" +
-      "dNodes\030\003 \003(\0132\013.ServerName\"\234\001\n\022OpenRegion" +
-      "Response\022<\n\014openingState\030\001 \003(\0162&.OpenReg" +
-      "ionResponse.RegionOpeningState\"H\n\022Region",
-      "OpeningState\022\n\n\006OPENED\020\000\022\022\n\016ALREADY_OPEN" +
-      "ED\020\001\022\022\n\016FAILED_OPENING\020\002\"\232\001\n\022CloseRegion" +
-      "Request\022 \n\006region\030\001 \002(\0132\020.RegionSpecifie" +
-      "r\022\034\n\024versionOfClosingNode\030\002 \001(\r\022\034\n\016trans" +
-      "itionInZK\030\003 \001(\010:\004true\022&\n\021destinationServ" +
-      "er\030\004 \001(\0132\013.ServerName\"%\n\023CloseRegionResp" +
-      "onse\022\016\n\006closed\030\001 \002(\010\"M\n\022FlushRegionReque" +
-      "st\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022\025\n\r" +
-      "ifOlderThanTs\030\002 \001(\004\"=\n\023FlushRegionRespon" +
-      "se\022\025\n\rlastFlushTime\030\001 \002(\004\022\017\n\007flushed\030\002 \001",
-      "(\010\"J\n\022SplitRegionRequest\022 \n\006region\030\001 \002(\013" +
-      "2\020.RegionSpecifier\022\022\n\nsplitPoint\030\002 \001(\014\"\025" +
-      "\n\023SplitRegionResponse\"W\n\024CompactRegionRe" +
-      "quest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022" +
-      "\r\n\005major\030\002 \001(\010\022\016\n\006family\030\003 \001(\014\"\027\n\025Compac" +
-      "tRegionResponse\"t\n\023MergeRegionsRequest\022!" +
-      "\n\007regionA\030\001 \002(\0132\020.RegionSpecifier\022!\n\007reg" +
-      "ionB\030\002 \002(\0132\020.RegionSpecifier\022\027\n\010forcible" +
-      "\030\003 \001(\010:\005false\"\026\n\024MergeRegionsResponse\"7\n" +
-      "\010WALEntry\022\024\n\003key\030\001 \002(\0132\007.WALKey\022\025\n\rkeyVa",
-      "lueBytes\030\002 \003(\014\"4\n\030ReplicateWALEntryReque" +
-      "st\022\030\n\005entry\030\001 \003(\0132\t.WALEntry\"\033\n\031Replicat" +
-      "eWALEntryResponse\"\026\n\024RollWALWriterReques" +
-      "t\".\n\025RollWALWriterResponse\022\025\n\rregionToFl" +
-      "ush\030\001 \003(\014\"#\n\021StopServerRequest\022\016\n\006reason" +
-      "\030\001 \002(\t\"\024\n\022StopServerResponse\"\026\n\024GetServe" +
-      "rInfoRequest\"@\n\nServerInfo\022\037\n\nserverName" +
-      "\030\001 \002(\0132\013.ServerName\022\021\n\twebuiPort\030\002 \001(\r\"8" +
-      "\n\025GetServerInfoResponse\022\037\n\nserverInfo\030\001 " +
-      "\002(\0132\013.ServerInfo2\266\006\n\014AdminService\022>\n\rget",
-      "RegionInfo\022\025.GetRegionInfoRequest\032\026.GetR" +
-      "egionInfoResponse\022;\n\014getStoreFile\022\024.GetS" +
-      "toreFileRequest\032\025.GetStoreFileResponse\022D" +
-      "\n\017getOnlineRegion\022\027.GetOnlineRegionReque" +
-      "st\032\030.GetOnlineRegionResponse\0225\n\nopenRegi" +
-      "on\022\022.OpenRegionRequest\032\023.OpenRegionRespo" +
-      "nse\0228\n\013closeRegion\022\023.CloseRegionRequest\032" +
-      "\024.CloseRegionResponse\0228\n\013flushRegion\022\023.F" +
-      "lushRegionRequest\032\024.FlushRegionResponse\022" +
-      "8\n\013splitRegion\022\023.SplitRegionRequest\032\024.Sp",
-      "litRegionResponse\022>\n\rcompactRegion\022\025.Com" +
-      "pactRegionRequest\032\026.CompactRegionRespons" +
-      "e\022;\n\014mergeRegions\022\024.MergeRegionsRequest\032" +
-      "\025.MergeRegionsResponse\022J\n\021replicateWALEn" +
-      "try\022\031.ReplicateWALEntryRequest\032\032.Replica" +
-      "teWALEntryResponse\022>\n\rrollWALWriter\022\025.Ro" +
-      "llWALWriterRequest\032\026.RollWALWriterRespon" +
-      "se\022>\n\rgetServerInfo\022\025.GetServerInfoReque" +
-      "st\032\026.GetServerInfoResponse\0225\n\nstopServer" +
-      "\022\022.StopServerRequest\032\023.StopServerRespons",
-      "eBA\n*org.apache.hadoop.hbase.protobuf.ge" +
-      "neratedB\013AdminProtosH\001\210\001\001\240\001\001"
+      "\n\013Admin.proto\032\014Client.proto\032\013hbase.proto" +
+      "\032\tWAL.proto\"Q\n\024GetRegionInfoRequest\022 \n\006r" +
+      "egion\030\001 \002(\0132\020.RegionSpecifier\022\027\n\017compact" +
+      "ionState\030\002 \001(\010\"\301\001\n\025GetRegionInfoResponse" +
+      "\022\037\n\nregionInfo\030\001 \002(\0132\013.RegionInfo\022?\n\017com" +
+      "pactionState\030\002 \001(\0162&.GetRegionInfoRespon" +
+      "se.CompactionState\"F\n\017CompactionState\022\010\n" +
+      "\004NONE\020\000\022\t\n\005MINOR\020\001\022\t\n\005MAJOR\020\002\022\023\n\017MAJOR_A" +
+      "ND_MINOR\020\003\"G\n\023GetStoreFileRequest\022 \n\006reg" +
+      "ion\030\001 \002(\0132\020.RegionSpecifier\022\016\n\006family\030\002 ",
+      "\003(\014\")\n\024GetStoreFileResponse\022\021\n\tstoreFile" +
+      "\030\001 \003(\t\"\030\n\026GetOnlineRegionRequest\":\n\027GetO" +
+      "nlineRegionResponse\022\037\n\nregionInfo\030\001 \003(\0132" +
+      "\013.RegionInfo\"\270\001\n\021OpenRegionRequest\0223\n\010op" +
+      "enInfo\030\001 \003(\0132!.OpenRegionRequest.RegionO" +
+      "penInfo\032n\n\016RegionOpenInfo\022\033\n\006region\030\001 \002(" +
+      "\0132\013.RegionInfo\022\034\n\024versionOfOfflineNode\030\002" +
+      " \001(\r\022!\n\014favoredNodes\030\003 \003(\0132\013.ServerName\"" +
+      "\234\001\n\022OpenRegionResponse\022<\n\014openingState\030\001" +
+      " \003(\0162&.OpenRegionResponse.RegionOpeningS",
+      "tate\"H\n\022RegionOpeningState\022\n\n\006OPENED\020\000\022\022" +
+      "\n\016ALREADY_OPENED\020\001\022\022\n\016FAILED_OPENING\020\002\"\232" +
+      "\001\n\022CloseRegionRequest\022 \n\006region\030\001 \002(\0132\020." +
+      "RegionSpecifier\022\034\n\024versionOfClosingNode\030" +
+      "\002 \001(\r\022\034\n\016transitionInZK\030\003 \001(\010:\004true\022&\n\021d" +
+      "estinationServer\030\004 \001(\0132\013.ServerName\"%\n\023C" +
+      "loseRegionResponse\022\016\n\006closed\030\001 \002(\010\"M\n\022Fl" +
+      "ushRegionRequest\022 \n\006region\030\001 \002(\0132\020.Regio" +
+      "nSpecifier\022\025\n\rifOlderThanTs\030\002 \001(\004\"=\n\023Flu" +
+      "shRegionResponse\022\025\n\rlastFlushTime\030\001 \002(\004\022",
+      "\017\n\007flushed\030\002 \001(\010\"J\n\022SplitRegionRequest\022 " +
+      "\n\006region\030\001 \002(\0132\020.RegionSpecifier\022\022\n\nspli" +
+      "tPoint\030\002 \001(\014\"\025\n\023SplitRegionResponse\"W\n\024C" +
+      "ompactRegionRequest\022 \n\006region\030\001 \002(\0132\020.Re" +
+      "gionSpecifier\022\r\n\005major\030\002 \001(\010\022\016\n\006family\030\003" +
+      " \001(\014\"\027\n\025CompactRegionResponse\"t\n\023MergeRe" +
+      "gionsRequest\022!\n\007regionA\030\001 \002(\0132\020.RegionSp" +
+      "ecifier\022!\n\007regionB\030\002 \002(\0132\020.RegionSpecifi" +
+      "er\022\027\n\010forcible\030\003 \001(\010:\005false\"\026\n\024MergeRegi" +
+      "onsResponse\"7\n\010WALEntry\022\024\n\003key\030\001 \002(\0132\007.W",
+      "ALKey\022\025\n\rkeyValueBytes\030\002 \003(\014\"4\n\030Replicat" +
+      "eWALEntryRequest\022\030\n\005entry\030\001 \003(\0132\t.WALEnt" +
+      "ry\"\033\n\031ReplicateWALEntryResponse\"\026\n\024RollW" +
+      "ALWriterRequest\".\n\025RollWALWriterResponse" +
+      "\022\025\n\rregionToFlush\030\001 \003(\014\"#\n\021StopServerReq" +
+      "uest\022\016\n\006reason\030\001 \002(\t\"\024\n\022StopServerRespon" +
+      "se\"\026\n\024GetServerInfoRequest\"@\n\nServerInfo" +
+      "\022\037\n\nserverName\030\001 \002(\0132\013.ServerName\022\021\n\tweb" +
+      "uiPort\030\002 \001(\r\"8\n\025GetServerInfoResponse\022\037\n" +
+      "\nserverInfo\030\001 \002(\0132\013.ServerInfo2\337\006\n\014Admin",
+      "Service\022>\n\rgetRegionInfo\022\025.GetRegionInfo" +
+      "Request\032\026.GetRegionInfoResponse\022;\n\014getSt" +
+      "oreFile\022\024.GetStoreFileRequest\032\025.GetStore" +
+      "FileResponse\022D\n\017getOnlineRegion\022\027.GetOnl" +
+      "ineRegionRequest\032\030.GetOnlineRegionRespon" +
+      "se\0225\n\nopenRegion\022\022.OpenRegionRequest\032\023.O" +
+      "penRegionResponse\0228\n\013closeRegion\022\023.Close" +
+      "RegionRequest\032\024.CloseRegionResponse\0228\n\013f" +
+      "lushRegion\022\023.FlushRegionRequest\032\024.FlushR" +
+      "egionResponse\0228\n\013splitRegion\022\023.SplitRegi",
+      "onRequest\032\024.SplitRegionResponse\022>\n\rcompa" +
+      "ctRegion\022\025.CompactRegionRequest\032\026.Compac" +
+      "tRegionResponse\022;\n\014mergeRegions\022\024.MergeR" +
+      "egionsRequest\032\025.MergeRegionsResponse\022J\n\021" +
+      "replicateWALEntry\022\031.ReplicateWALEntryReq" +
+      "uest\032\032.ReplicateWALEntryResponse\022\'\n\006repl" +
+      "ay\022\r.MultiRequest\032\016.MultiResponse\022>\n\rrol" +
+      "lWALWriter\022\025.RollWALWriterRequest\032\026.Roll" +
+      "WALWriterResponse\022>\n\rgetServerInfo\022\025.Get" +
+      "ServerInfoRequest\032\026.GetServerInfoRespons",
+      "e\0225\n\nstopServer\022\022.StopServerRequest\032\023.St" +
+      "opServerResponseBA\n*org.apache.hadoop.hb" +
+      "ase.protobuf.generatedB\013AdminProtosH\001\210\001\001" +
+      "\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -15571,6 +15638,7 @@ public final class AdminProtos {
     com.google.protobuf.Descriptors.FileDescriptor
       .internalBuildGeneratedFileFrom(descriptorData,
         new com.google.protobuf.Descriptors.FileDescriptor[] {
+          org.apache.hadoop.hbase.protobuf.generated.ClientProtos.getDescriptor(),
           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(),
           org.apache.hadoop.hbase.protobuf.generated.WALProtos.getDescriptor(),
         }, assigner);



Mime
View raw message