hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r1478637 [4/9] - in /hbase/trunk: hbase-client/src/main/java/org/apache/hadoop/hbase/ hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ hbase-client/src/main/java/org/apac...
Date Fri, 03 May 2013 03:52:18 GMT
Modified: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java?rev=1478637&r1=1478636&r2=1478637&view=diff
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java (original)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java Fri May  3 03:52:15 2013
@@ -46,11 +46,8 @@ import org.apache.hadoop.hbase.HRegionIn
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
-import org.apache.hadoop.hbase.MasterAdminProtocol;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.AdminProtocol;
 import org.apache.hadoop.hbase.client.Append;
-import org.apache.hadoop.hbase.client.ClientProtocol;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
@@ -67,6 +64,7 @@ import org.apache.hadoop.hbase.filter.Fi
 import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
 import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
@@ -85,6 +83,7 @@ import org.apache.hadoop.hbase.protobuf.
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
@@ -105,6 +104,7 @@ import org.apache.hadoop.hbase.protobuf.
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
 import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MasterAdminService;
 import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
 import org.apache.hadoop.hbase.security.access.Permission;
@@ -116,6 +116,7 @@ import org.apache.hadoop.hbase.util.Dyna
 import org.apache.hadoop.hbase.util.Methods;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.token.Token;
 
 import com.google.common.collect.ArrayListMultimap;
@@ -222,6 +223,9 @@ public final class ProtobufUtil {
     if (e == null) {
       return new IOException(se);
     }
+    if (e instanceof RemoteException) {
+      e = ((RemoteException)e).unwrapRemoteException();
+    }
     return e instanceof IOException ? (IOException) e : new IOException(se);
   }
 
@@ -1206,7 +1210,7 @@ public final class ProtobufUtil {
    * @return the result of the Get
    * @throws IOException
    */
-  public static Result get(final ClientProtocol client,
+  public static Result get(final ClientService.BlockingInterface client,
       final byte[] regionName, final Get get) throws IOException {
     GetRequest request =
       RequestConverter.buildGetRequest(regionName, get);
@@ -1229,7 +1233,7 @@ public final class ProtobufUtil {
    * @return the row or the closestRowBefore if it doesn't exist
    * @throws IOException
    */
-  public static Result getRowOrBefore(final ClientProtocol client,
+  public static Result getRowOrBefore(final ClientService.BlockingInterface client,
       final byte[] regionName, final byte[] row,
       final byte[] family) throws IOException {
     GetRequest request =
@@ -1254,7 +1258,7 @@ public final class ProtobufUtil {
    * @return true if all are loaded
    * @throws IOException
    */
-  public static boolean bulkLoadHFile(final ClientProtocol client,
+  public static boolean bulkLoadHFile(final ClientService.BlockingInterface client,
       final List<Pair<byte[], String>> familyPaths,
       final byte[] regionName, boolean assignSeqNum) throws IOException {
     BulkLoadHFileRequest request =
@@ -1268,7 +1272,7 @@ public final class ProtobufUtil {
     }
   }
 
-  public static CoprocessorServiceResponse execService(final ClientProtocol client,
+  public static CoprocessorServiceResponse execService(final ClientService.BlockingInterface client,
       final CoprocessorServiceCall call, final byte[] regionName) throws IOException {
     CoprocessorServiceRequest request = CoprocessorServiceRequest.newBuilder()
         .setCall(call).setRegion(
@@ -1282,8 +1286,9 @@ public final class ProtobufUtil {
     }
   }
 
-  public static CoprocessorServiceResponse execService(final MasterAdminProtocol client,
-      final CoprocessorServiceCall call) throws IOException {
+  public static CoprocessorServiceResponse execService(
+    final MasterAdminService.BlockingInterface client, final CoprocessorServiceCall call)
+  throws IOException {
     CoprocessorServiceRequest request = CoprocessorServiceRequest.newBuilder()
         .setCall(call).setRegion(
             RequestConverter.buildRegionSpecifier(REGION_NAME, HConstants.EMPTY_BYTE_ARRAY)).build();
@@ -1315,7 +1320,7 @@ public final class ProtobufUtil {
    * @return the retrieved region info
    * @throws IOException
    */
-  public static HRegionInfo getRegionInfo(final AdminProtocol admin,
+  public static HRegionInfo getRegionInfo(final AdminService.BlockingInterface admin,
       final byte[] regionName) throws IOException {
     try {
       GetRegionInfoRequest request =
@@ -1337,7 +1342,7 @@ public final class ProtobufUtil {
    * @param transitionInZK
    * @throws IOException
    */
-  public static void closeRegion(final AdminProtocol admin,
+  public static void closeRegion(final AdminService.BlockingInterface admin,
       final byte[] regionName, final boolean transitionInZK) throws IOException {
     CloseRegionRequest closeRegionRequest =
       RequestConverter.buildCloseRegionRequest(regionName, transitionInZK);
@@ -1358,7 +1363,8 @@ public final class ProtobufUtil {
    * @return true if the region is closed
    * @throws IOException
    */
-  public static boolean closeRegion(final AdminProtocol admin, final byte[] regionName,
+  public static boolean closeRegion(final AdminService.BlockingInterface admin,
+      final byte[] regionName,
       final int versionOfClosingNode, final ServerName destinationServer,
       final boolean transitionInZK) throws IOException {
     CloseRegionRequest closeRegionRequest =
@@ -1379,7 +1385,7 @@ public final class ProtobufUtil {
    * @param region
    * @throws IOException
    */
-  public static void openRegion(final AdminProtocol admin,
+  public static void openRegion(final AdminService.BlockingInterface admin,
       final HRegionInfo region) throws IOException {
     OpenRegionRequest request =
       RequestConverter.buildOpenRegionRequest(region, -1);
@@ -1398,7 +1404,8 @@ public final class ProtobufUtil {
    * @return a list of online region info
    * @throws IOException
    */
-  public static List<HRegionInfo> getOnlineRegions(final AdminProtocol admin) throws IOException {
+  public static List<HRegionInfo> getOnlineRegions(final AdminService.BlockingInterface admin)
+  throws IOException {
     GetOnlineRegionRequest request = RequestConverter.buildGetOnlineRegionRequest();
     GetOnlineRegionResponse response = null;
     try {
@@ -1431,8 +1438,8 @@ public final class ProtobufUtil {
    * @return the server name
    * @throws IOException
    */
-  public static ServerInfo getServerInfo(
-      final AdminProtocol admin) throws IOException {
+  public static ServerInfo getServerInfo(final AdminService.BlockingInterface admin)
+  throws IOException {
     GetServerInfoRequest request = RequestConverter.buildGetServerInfoRequest();
     try {
       GetServerInfoResponse response = admin.getServerInfo(null, request);
@@ -1452,8 +1459,9 @@ public final class ProtobufUtil {
    * @return the list of store files
    * @throws IOException
    */
-  public static List<String> getStoreFiles(final AdminProtocol admin,
-      final byte[] regionName, final byte[] family) throws IOException {
+  public static List<String> getStoreFiles(final AdminService.BlockingInterface admin,
+      final byte[] regionName, final byte[] family)
+  throws IOException {
     GetStoreFileRequest request =
       RequestConverter.buildGetStoreFileRequest(regionName, family);
     try {
@@ -1472,7 +1480,7 @@ public final class ProtobufUtil {
    * @param splitPoint
    * @throws IOException
    */
-  public static void split(final AdminProtocol admin,
+  public static void split(final AdminService.BlockingInterface admin,
       final HRegionInfo hri, byte[] splitPoint) throws IOException {
     SplitRegionRequest request =
       RequestConverter.buildSplitRegionRequest(hri.getRegionName(), splitPoint);
@@ -1493,7 +1501,7 @@ public final class ProtobufUtil {
    *          two adjacent regions
    * @throws IOException
    */
-  public static void mergeRegions(final AdminProtocol admin,
+  public static void mergeRegions(final AdminService.BlockingInterface admin,
       final HRegionInfo region_a, final HRegionInfo region_b,
       final boolean forcible) throws IOException {
     MergeRegionsRequest request = RequestConverter.buildMergeRegionsRequest(

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java?rev=1478637&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java Fri May  3 03:52:15 2013
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.security;
+
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
+import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.TokenIdentifier.Kind;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos;
+import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+/**
+ * Maps RPC protocol interfaces to required configuration
+ */
+public class SecurityInfo {
+  /** Maps RPC service names to authentication information */
+  private static ConcurrentMap<String,SecurityInfo> infos = new ConcurrentHashMap<String,SecurityInfo>();
+  // populate info for known services
+  static {
+    infos.put(AdminProtos.AdminService.getDescriptor().getName(),
+        new SecurityInfo("hbase.regionserver.kerberos.principal", Kind.HBASE_AUTH_TOKEN));
+    infos.put(ClientProtos.ClientService.getDescriptor().getName(),
+        new SecurityInfo("hbase.regionserver.kerberos.principal", Kind.HBASE_AUTH_TOKEN));
+    infos.put(MasterAdminProtos.MasterAdminService.getDescriptor().getName(),
+        new SecurityInfo("hbase.master.kerberos.principal", Kind.HBASE_AUTH_TOKEN));
+    infos.put(MasterMonitorProtos.MasterMonitorService.getDescriptor().getName(),
+        new SecurityInfo("hbase.master.kerberos.principal", Kind.HBASE_AUTH_TOKEN));
+    infos.put(RegionServerStatusProtos.RegionServerStatusService.getDescriptor().getName(),
+        new SecurityInfo("hbase.master.kerberos.principal", Kind.HBASE_AUTH_TOKEN));
+  }
+
+  /**
+   * Adds a security configuration for a new service name.  Note that this will have no effect if
+   * the service name was already registered.
+   */
+  public static void addInfo(String serviceName, SecurityInfo securityInfo) {
+    infos.putIfAbsent(serviceName, securityInfo);
+  }
+
+  /**
+   * Returns the security configuration associated with the given service name.
+   */
+  public static SecurityInfo getInfo(String serviceName) {
+    return infos.get(serviceName);
+  }
+
+  private final String serverPrincipal;
+  private final Kind tokenKind;
+
+  public SecurityInfo(String serverPrincipal, Kind tokenKind) {
+    this.serverPrincipal = serverPrincipal;
+    this.tokenKind = tokenKind;
+  }
+
+  public String getServerPrincipal() {
+    return serverPrincipal;
+  }
+
+  public Kind getTokenKind() {
+    return tokenKind;
+  }
+}

Modified: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java?rev=1478637&r1=1478636&r2=1478637&view=diff
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java (original)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java Fri May  3 03:52:15 2013
@@ -116,7 +116,8 @@ public class RecoverableZooKeeper {
       // the identifier = processID@hostName
       identifier = ManagementFactory.getRuntimeMXBean().getName();
     }
-    LOG.info("The identifier of this process is " + identifier);
+    LOG.info("Process identifier=" + identifier +
+      " connecting to ZooKeeper ensemble=" + quorumServers);
     this.identifier = identifier;
     this.id = Bytes.toBytes(identifier);
 

Modified: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java?rev=1478637&r1=1478636&r2=1478637&view=diff
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java (original)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java Fri May  3 03:52:15 2013
@@ -56,8 +56,7 @@ public class ZKConfig {
    * @return Properties holding mappings representing ZooKeeper config file.
    */
   public static Properties makeZKProps(Configuration conf) {
-    if (conf.getBoolean(HConstants.HBASE_CONFIG_READ_ZOOKEEPER_CONFIG,
-        false)) {
+    if (conf.getBoolean(HConstants.HBASE_CONFIG_READ_ZOOKEEPER_CONFIG, false)) {
       LOG.warn(
           "Parsing ZooKeeper's " + HConstants.ZOOKEEPER_CONFIG_NAME +
           " file for ZK properties " +
@@ -80,12 +79,9 @@ public class ZKConfig {
         }
       }
     } else {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(
-            "Skipped reading ZK properties file '" +
-            HConstants.ZOOKEEPER_CONFIG_NAME +
-            "' since '" + HConstants.HBASE_CONFIG_READ_ZOOKEEPER_CONFIG +
-            "' was not set to true");
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("Skipped reading ZK properties file '" + HConstants.ZOOKEEPER_CONFIG_NAME +
+          "' since '" + HConstants.HBASE_CONFIG_READ_ZOOKEEPER_CONFIG + "' was not set to true");
       }
     }
 

Modified: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java?rev=1478637&r1=1478636&r2=1478637&view=diff
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java (original)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java Fri May  3 03:52:15 2013
@@ -116,8 +116,9 @@ public class ZKUtil {
     }
     int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT,
         HConstants.DEFAULT_ZK_SESSION_TIMEOUT);
-    LOG.debug(identifier + " opening connection to ZooKeeper with ensemble (" +
-        ensemble + ")");
+    if (LOG.isTraceEnabled()) {
+      LOG.debug(identifier + " opening connection to ZooKeeper ensemble=" + ensemble);
+    }
     int retry = conf.getInt("zookeeper.recovery.retry", 3);
     int retryIntervalMillis =
       conf.getInt("zookeeper.recovery.retry.intervalmill", 1000);
@@ -419,9 +420,9 @@ public class ZKUtil {
       Stat s = zkw.getRecoverableZooKeeper().exists(znode, zkw);
       boolean exists = s != null ? true : false;
       if (exists) {
-        LOG.debug(zkw.prefix("Set watcher on existing znode " + znode));
+        LOG.debug(zkw.prefix("Set watcher on existing znode=" + znode));
       } else {
-        LOG.debug(zkw.prefix(znode+" does not exist. Watcher is set."));
+        LOG.debug(zkw.prefix("Set watcher on znode that does not yet exist, " + znode));
       }
       return exists;
     } catch (KeeperException e) {

Modified: hbase/trunk/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java?rev=1478637&r1=1478636&r2=1478637&view=diff
==============================================================================
--- hbase/trunk/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java (original)
+++ hbase/trunk/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java Fri May  3 03:52:15 2013
@@ -78,7 +78,7 @@ public class TestSnapshotFromAdmin {
     // mock the master admin to our mock
     MasterAdminKeepAliveConnection mockMaster = Mockito.mock(MasterAdminKeepAliveConnection.class);
     Mockito.when(mockConnection.getConfiguration()).thenReturn(conf);
-    Mockito.when(mockConnection.getKeepAliveMasterAdmin()).thenReturn(mockMaster);
+    Mockito.when(mockConnection.getKeepAliveMasterAdminService()).thenReturn(mockMaster);
     // set the max wait time for the snapshot to complete
     TakeSnapshotResponse response = TakeSnapshotResponse.newBuilder()
         .setExpectedTimeout(maxWaitTime)
@@ -135,7 +135,7 @@ public class TestSnapshotFromAdmin {
 
     // mock the master connection
     MasterAdminKeepAliveConnection master = Mockito.mock(MasterAdminKeepAliveConnection.class);
-    Mockito.when(mockConnection.getKeepAliveMasterAdmin()).thenReturn(master);
+    Mockito.when(mockConnection.getKeepAliveMasterAdminService()).thenReturn(master);
     TakeSnapshotResponse response = TakeSnapshotResponse.newBuilder().setExpectedTimeout(0).build();
     Mockito.when(
       master.snapshot((RpcController) Mockito.isNull(), Mockito.any(TakeSnapshotRequest.class)))

Modified: hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java?rev=1478637&r1=1478636&r2=1478637&view=diff
==============================================================================
--- hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java (original)
+++ hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java Fri May  3 03:52:15 2013
@@ -573,17 +573,6 @@ public final class HConstants {
   public static int DEFAULT_HBASE_CLIENT_RETRIES_NUMBER = 10;
 
   /**
-   * Parameter name for maximum attempts, used to limit the number of times the
-   * client will try to obtain the proxy for a given region server.
-   */
-  public static String HBASE_CLIENT_RPC_MAXATTEMPTS = "hbase.client.rpc.maxattempts";
-
-  /**
-   * Default value of {@link #HBASE_CLIENT_RPC_MAXATTEMPTS}.
-   */
-  public static int DEFAULT_HBASE_CLIENT_RPC_MAXATTEMPTS = 1;
-
-  /**
    * Parameter name for client prefetch limit, used as the maximum number of regions
    * info that will be prefetched.
    */

Modified: hbase/trunk/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassFinder.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassFinder.java?rev=1478637&r1=1478636&r2=1478637&view=diff
==============================================================================
--- hbase/trunk/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassFinder.java (original)
+++ hbase/trunk/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassFinder.java Fri May  3 03:52:15 2013
@@ -22,15 +22,14 @@ import java.io.File;
 import java.io.FileFilter;
 import java.io.FileInputStream;
 import java.io.IOException;
-import java.lang.reflect.Method;
-import java.lang.reflect.Modifier;
 import java.net.URL;
 import java.util.ArrayList;
 import java.util.Enumeration;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
-import java.util.jar.*;
+import java.util.jar.JarEntry;
+import java.util.jar.JarInputStream;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 

Modified: hbase/trunk/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java?rev=1478637&r1=1478636&r2=1478637&view=diff
==============================================================================
--- hbase/trunk/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java (original)
+++ hbase/trunk/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java Fri May  3 03:52:15 2013
@@ -23,15 +23,17 @@ import java.util.HashMap;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ClusterManager.ServiceType;
-import org.apache.hadoop.hbase.client.AdminProtocol;
-import org.apache.hadoop.hbase.client.ClientProtocol;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.exceptions.MasterNotRunningException;
 import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
 
@@ -86,12 +88,14 @@ public class DistributedHBaseCluster ext
   }
 
   @Override
-  public AdminProtocol getAdminProtocol(ServerName serverName) throws IOException {
+  public AdminProtos.AdminService.BlockingInterface getAdminProtocol(ServerName serverName)
+  throws IOException {
     return admin.getConnection().getAdmin(serverName);
   }
 
   @Override
-  public ClientProtocol getClientProtocol(ServerName serverName) throws IOException {
+  public ClientProtos.ClientService.BlockingInterface getClientProtocol(ServerName serverName)
+  throws IOException {
     return admin.getConnection().getClient(serverName);
   }
 
@@ -133,13 +137,15 @@ public class DistributedHBaseCluster ext
   }
 
   @Override
-  public MasterAdminProtocol getMasterAdmin() throws IOException {
+  public MasterAdminProtos.MasterAdminService.BlockingInterface getMasterAdmin()
+  throws IOException {
     HConnection conn = HConnectionManager.getConnection(conf);
     return conn.getMasterAdmin();
   }
 
   @Override
-  public MasterMonitorProtocol getMasterMonitor() throws IOException {
+  public MasterMonitorProtos.MasterMonitorService.BlockingInterface getMasterMonitor()
+  throws IOException {
     HConnection conn = HConnectionManager.getConnection(conf);
     return conn.getMasterMonitor();
   }
@@ -195,7 +201,8 @@ public class DistributedHBaseCluster ext
       return null;
     }
 
-    AdminProtocol client = connection.getAdmin(regionLoc.getServerName());
+    AdminProtos.AdminService.BlockingInterface client =
+      connection.getAdmin(regionLoc.getServerName());
     ServerInfo info = ProtobufUtil.getServerInfo(client);
     return ProtobufUtil.toServerName(info.getServerName());
   }

Modified: hbase/trunk/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRebalanceAndKillServersTargeted.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRebalanceAndKillServersTargeted.java?rev=1478637&r1=1478636&r2=1478637&view=diff
==============================================================================
--- hbase/trunk/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRebalanceAndKillServersTargeted.java (original)
+++ hbase/trunk/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRebalanceAndKillServersTargeted.java Fri May  3 03:52:15 2013
@@ -103,7 +103,7 @@ public class IntegrationTestRebalanceAnd
   @SuppressWarnings("unchecked")
   public void setUp() throws Exception {
     Configuration conf = HBaseConfiguration.create();
-    conf.set(HConnectionManager.RETRIES_BY_SERVER, "true");
+    conf.set(HConnectionManager.RETRIES_BY_SERVER_KEY, "true");
     super.setUp(NUM_SLAVES_BASE, conf);
 
     ChaosMonkey.Policy chaosPolicy = new ChaosMonkey.PeriodicRandomActionPolicy(

Modified: hbase/trunk/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestsDriver.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestsDriver.java?rev=1478637&r1=1478636&r2=1478637&view=diff
==============================================================================
--- hbase/trunk/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestsDriver.java (original)
+++ hbase/trunk/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestsDriver.java Fri May  3 03:52:15 2013
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hbase;
 
 import java.io.IOException;
-import java.util.List;
 import java.util.Set;
 import java.util.regex.Pattern;
 
@@ -113,5 +112,4 @@ public class IntegrationTestsDriver exte
 
     return result.wasSuccessful() ? 0 : 1;
   }
-
 }

Modified: hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterAdminProtos.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterAdminProtos.java?rev=1478637&r1=1478636&r2=1478637&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterAdminProtos.java (original)
+++ hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterAdminProtos.java Fri May  3 03:52:15 2013
@@ -20646,6 +20646,11 @@ public final class MasterAdminProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest request,
           com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneResponse> done);
       
+      public abstract void isMasterRunning(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request,
+          com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse> done);
+      
     }
     
     public static com.google.protobuf.Service newReflectiveService(
@@ -20867,6 +20872,14 @@ public final class MasterAdminProtos {
           impl.isRestoreSnapshotDone(controller, request, done);
         }
         
+        @java.lang.Override
+        public  void isMasterRunning(
+            com.google.protobuf.RpcController controller,
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request,
+            com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse> done) {
+          impl.isMasterRunning(controller, request, done);
+        }
+        
       };
     }
     
@@ -20943,6 +20956,8 @@ public final class MasterAdminProtos {
               return impl.restoreSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotRequest)request);
             case 26:
               return impl.isRestoreSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest)request);
+            case 27:
+              return impl.isMasterRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest)request);
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -21011,6 +21026,8 @@ public final class MasterAdminProtos {
               return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotRequest.getDefaultInstance();
             case 26:
               return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest.getDefaultInstance();
+            case 27:
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -21079,6 +21096,8 @@ public final class MasterAdminProtos {
               return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotResponse.getDefaultInstance();
             case 26:
               return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneResponse.getDefaultInstance();
+            case 27:
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -21222,6 +21241,11 @@ public final class MasterAdminProtos {
         org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest request,
         com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneResponse> done);
     
+    public abstract void isMasterRunning(
+        com.google.protobuf.RpcController controller,
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request,
+        com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse> done);
+    
     public static final
         com.google.protobuf.Descriptors.ServiceDescriptor
         getDescriptor() {
@@ -21379,6 +21403,11 @@ public final class MasterAdminProtos {
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneResponse>specializeCallback(
               done));
           return;
+        case 27:
+          this.isMasterRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest)request,
+            com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse>specializeCallback(
+              done));
+          return;
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -21447,6 +21476,8 @@ public final class MasterAdminProtos {
           return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotRequest.getDefaultInstance();
         case 26:
           return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest.getDefaultInstance();
+        case 27:
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -21515,6 +21546,8 @@ public final class MasterAdminProtos {
           return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotResponse.getDefaultInstance();
         case 26:
           return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneResponse.getDefaultInstance();
+        case 27:
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -21940,6 +21973,21 @@ public final class MasterAdminProtos {
             org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneResponse.class,
             org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneResponse.getDefaultInstance()));
       }
+      
+      public  void isMasterRunning(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request,
+          com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse> done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(27),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance(),
+          com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.class,
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance()));
+      }
     }
     
     public static BlockingInterface newBlockingStub(
@@ -22082,6 +22130,11 @@ public final class MasterAdminProtos {
           com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest request)
           throws com.google.protobuf.ServiceException;
+      
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse isMasterRunning(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request)
+          throws com.google.protobuf.ServiceException;
     }
     
     private static final class BlockingStub implements BlockingInterface {
@@ -22414,6 +22467,18 @@ public final class MasterAdminProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneResponse.getDefaultInstance());
       }
       
+      
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse isMasterRunning(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(27),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance());
+      }
+      
     }
   }
   
@@ -22686,114 +22751,117 @@ public final class MasterAdminProtos {
       descriptor;
   static {
     java.lang.String[] descriptorData = {
-      "\n\021MasterAdmin.proto\032\013hbase.proto\032\014Client" +
-      ".proto\"R\n\020AddColumnRequest\022\021\n\ttableName\030" +
-      "\001 \002(\014\022+\n\016columnFamilies\030\002 \002(\0132\023.ColumnFa" +
-      "milySchema\"\023\n\021AddColumnResponse\"<\n\023Delet" +
-      "eColumnRequest\022\021\n\ttableName\030\001 \002(\014\022\022\n\ncol" +
-      "umnName\030\002 \002(\014\"\026\n\024DeleteColumnResponse\"U\n" +
-      "\023ModifyColumnRequest\022\021\n\ttableName\030\001 \002(\014\022" +
-      "+\n\016columnFamilies\030\002 \002(\0132\023.ColumnFamilySc" +
-      "hema\"\026\n\024ModifyColumnResponse\"Z\n\021MoveRegi" +
-      "onRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpecif",
-      "ier\022#\n\016destServerName\030\002 \001(\0132\013.ServerName" +
-      "\"\024\n\022MoveRegionResponse\"~\n\035DispatchMergin" +
-      "gRegionsRequest\022!\n\007regionA\030\001 \002(\0132\020.Regio" +
-      "nSpecifier\022!\n\007regionB\030\002 \002(\0132\020.RegionSpec" +
-      "ifier\022\027\n\010forcible\030\003 \001(\010:\005false\" \n\036Dispat" +
-      "chMergingRegionsResponse\"7\n\023AssignRegion" +
-      "Request\022 \n\006region\030\001 \002(\0132\020.RegionSpecifie" +
-      "r\"\026\n\024AssignRegionResponse\"O\n\025UnassignReg" +
+      "\n\021MasterAdmin.proto\032\014Master.proto\032\013hbase" +
+      ".proto\032\014Client.proto\"R\n\020AddColumnRequest" +
+      "\022\021\n\ttableName\030\001 \002(\014\022+\n\016columnFamilies\030\002 " +
+      "\002(\0132\023.ColumnFamilySchema\"\023\n\021AddColumnRes" +
+      "ponse\"<\n\023DeleteColumnRequest\022\021\n\ttableNam" +
+      "e\030\001 \002(\014\022\022\n\ncolumnName\030\002 \002(\014\"\026\n\024DeleteCol" +
+      "umnResponse\"U\n\023ModifyColumnRequest\022\021\n\tta" +
+      "bleName\030\001 \002(\014\022+\n\016columnFamilies\030\002 \002(\0132\023." +
+      "ColumnFamilySchema\"\026\n\024ModifyColumnRespon" +
+      "se\"Z\n\021MoveRegionRequest\022 \n\006region\030\001 \002(\0132",
+      "\020.RegionSpecifier\022#\n\016destServerName\030\002 \001(" +
+      "\0132\013.ServerName\"\024\n\022MoveRegionResponse\"~\n\035" +
+      "DispatchMergingRegionsRequest\022!\n\007regionA" +
+      "\030\001 \002(\0132\020.RegionSpecifier\022!\n\007regionB\030\002 \002(" +
+      "\0132\020.RegionSpecifier\022\027\n\010forcible\030\003 \001(\010:\005f" +
+      "alse\" \n\036DispatchMergingRegionsResponse\"7" +
+      "\n\023AssignRegionRequest\022 \n\006region\030\001 \002(\0132\020." +
+      "RegionSpecifier\"\026\n\024AssignRegionResponse\"" +
+      "O\n\025UnassignRegionRequest\022 \n\006region\030\001 \002(\013" +
+      "2\020.RegionSpecifier\022\024\n\005force\030\002 \001(\010:\005false",
+      "\"\030\n\026UnassignRegionResponse\"8\n\024OfflineReg" +
       "ionRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpeci" +
-      "fier\022\024\n\005force\030\002 \001(\010:\005false\"\030\n\026UnassignRe",
-      "gionResponse\"8\n\024OfflineRegionRequest\022 \n\006" +
-      "region\030\001 \002(\0132\020.RegionSpecifier\"\027\n\025Offlin" +
-      "eRegionResponse\"J\n\022CreateTableRequest\022!\n" +
-      "\013tableSchema\030\001 \002(\0132\014.TableSchema\022\021\n\tspli" +
-      "tKeys\030\002 \003(\014\"\025\n\023CreateTableResponse\"\'\n\022De" +
-      "leteTableRequest\022\021\n\ttableName\030\001 \002(\014\"\025\n\023D" +
-      "eleteTableResponse\"\'\n\022EnableTableRequest" +
-      "\022\021\n\ttableName\030\001 \002(\014\"\025\n\023EnableTableRespon" +
-      "se\"(\n\023DisableTableRequest\022\021\n\ttableName\030\001" +
-      " \002(\014\"\026\n\024DisableTableResponse\"J\n\022ModifyTa",
-      "bleRequest\022\021\n\ttableName\030\001 \002(\014\022!\n\013tableSc" +
-      "hema\030\002 \002(\0132\014.TableSchema\"\025\n\023ModifyTableR" +
-      "esponse\"\021\n\017ShutdownRequest\"\022\n\020ShutdownRe" +
-      "sponse\"\023\n\021StopMasterRequest\"\024\n\022StopMaste" +
-      "rResponse\"\020\n\016BalanceRequest\"&\n\017BalanceRe" +
-      "sponse\022\023\n\013balancerRan\030\001 \002(\010\"<\n\031SetBalanc" +
-      "erRunningRequest\022\n\n\002on\030\001 \002(\010\022\023\n\013synchron" +
-      "ous\030\002 \001(\010\"6\n\032SetBalancerRunningResponse\022" +
-      "\030\n\020prevBalanceValue\030\001 \001(\010\"\024\n\022CatalogScan" +
-      "Request\")\n\023CatalogScanResponse\022\022\n\nscanRe",
-      "sult\030\001 \001(\005\"-\n\033EnableCatalogJanitorReques" +
-      "t\022\016\n\006enable\030\001 \002(\010\"1\n\034EnableCatalogJanito" +
-      "rResponse\022\021\n\tprevValue\030\001 \001(\010\" \n\036IsCatalo" +
-      "gJanitorEnabledRequest\"0\n\037IsCatalogJanit" +
-      "orEnabledResponse\022\r\n\005value\030\001 \002(\010\"=\n\023Take" +
-      "SnapshotRequest\022&\n\010snapshot\030\001 \002(\0132\024.Snap" +
-      "shotDescription\"/\n\024TakeSnapshotResponse\022" +
-      "\027\n\017expectedTimeout\030\001 \002(\003\"\025\n\023ListSnapshot" +
-      "Request\"?\n\024ListSnapshotResponse\022\'\n\tsnaps" +
-      "hots\030\001 \003(\0132\024.SnapshotDescription\"?\n\025Dele",
-      "teSnapshotRequest\022&\n\010snapshot\030\001 \002(\0132\024.Sn" +
-      "apshotDescription\"\030\n\026DeleteSnapshotRespo" +
-      "nse\"@\n\026RestoreSnapshotRequest\022&\n\010snapsho" +
-      "t\030\001 \002(\0132\024.SnapshotDescription\"\031\n\027Restore" +
-      "SnapshotResponse\"?\n\025IsSnapshotDoneReques" +
-      "t\022&\n\010snapshot\030\001 \001(\0132\024.SnapshotDescriptio" +
-      "n\"U\n\026IsSnapshotDoneResponse\022\023\n\004done\030\001 \001(" +
-      "\010:\005false\022&\n\010snapshot\030\002 \001(\0132\024.SnapshotDes" +
-      "cription\"F\n\034IsRestoreSnapshotDoneRequest" +
-      "\022&\n\010snapshot\030\001 \001(\0132\024.SnapshotDescription",
-      "\"3\n\035IsRestoreSnapshotDoneResponse\022\022\n\004don" +
-      "e\030\001 \001(\010:\004true2\377\r\n\022MasterAdminService\0222\n\t" +
-      "addColumn\022\021.AddColumnRequest\032\022.AddColumn" +
-      "Response\022;\n\014deleteColumn\022\024.DeleteColumnR" +
-      "equest\032\025.DeleteColumnResponse\022;\n\014modifyC" +
-      "olumn\022\024.ModifyColumnRequest\032\025.ModifyColu" +
-      "mnResponse\0225\n\nmoveRegion\022\022.MoveRegionReq" +
-      "uest\032\023.MoveRegionResponse\022Y\n\026dispatchMer" +
-      "gingRegions\022\036.DispatchMergingRegionsRequ" +
-      "est\032\037.DispatchMergingRegionsResponse\022;\n\014",
-      "assignRegion\022\024.AssignRegionRequest\032\025.Ass" +
-      "ignRegionResponse\022A\n\016unassignRegion\022\026.Un" +
-      "assignRegionRequest\032\027.UnassignRegionResp" +
-      "onse\022>\n\rofflineRegion\022\025.OfflineRegionReq" +
-      "uest\032\026.OfflineRegionResponse\0228\n\013deleteTa" +
-      "ble\022\023.DeleteTableRequest\032\024.DeleteTableRe" +
-      "sponse\0228\n\013enableTable\022\023.EnableTableReque" +
-      "st\032\024.EnableTableResponse\022;\n\014disableTable" +
-      "\022\024.DisableTableRequest\032\025.DisableTableRes" +
-      "ponse\0228\n\013modifyTable\022\023.ModifyTableReques",
-      "t\032\024.ModifyTableResponse\0228\n\013createTable\022\023" +
-      ".CreateTableRequest\032\024.CreateTableRespons" +
-      "e\022/\n\010shutdown\022\020.ShutdownRequest\032\021.Shutdo" +
-      "wnResponse\0225\n\nstopMaster\022\022.StopMasterReq" +
-      "uest\032\023.StopMasterResponse\022,\n\007balance\022\017.B" +
-      "alanceRequest\032\020.BalanceResponse\022M\n\022setBa" +
-      "lancerRunning\022\032.SetBalancerRunningReques" +
-      "t\032\033.SetBalancerRunningResponse\022;\n\016runCat" +
-      "alogScan\022\023.CatalogScanRequest\032\024.CatalogS" +
-      "canResponse\022S\n\024enableCatalogJanitor\022\034.En",
-      "ableCatalogJanitorRequest\032\035.EnableCatalo" +
-      "gJanitorResponse\022\\\n\027isCatalogJanitorEnab" +
-      "led\022\037.IsCatalogJanitorEnabledRequest\032 .I" +
-      "sCatalogJanitorEnabledResponse\022L\n\021execMa" +
-      "sterService\022\032.CoprocessorServiceRequest\032" +
-      "\033.CoprocessorServiceResponse\0227\n\010snapshot" +
-      "\022\024.TakeSnapshotRequest\032\025.TakeSnapshotRes" +
-      "ponse\022D\n\025getCompletedSnapshots\022\024.ListSna" +
-      "pshotRequest\032\025.ListSnapshotResponse\022A\n\016d" +
-      "eleteSnapshot\022\026.DeleteSnapshotRequest\032\027.",
-      "DeleteSnapshotResponse\022A\n\016isSnapshotDone" +
-      "\022\026.IsSnapshotDoneRequest\032\027.IsSnapshotDon" +
-      "eResponse\022D\n\017restoreSnapshot\022\027.RestoreSn" +
-      "apshotRequest\032\030.RestoreSnapshotResponse\022" +
-      "V\n\025isRestoreSnapshotDone\022\035.IsRestoreSnap" +
-      "shotDoneRequest\032\036.IsRestoreSnapshotDoneR" +
-      "esponseBG\n*org.apache.hadoop.hbase.proto" +
-      "buf.generatedB\021MasterAdminProtosH\001\210\001\001\240\001\001"
+      "fier\"\027\n\025OfflineRegionResponse\"J\n\022CreateT" +
+      "ableRequest\022!\n\013tableSchema\030\001 \002(\0132\014.Table" +
+      "Schema\022\021\n\tsplitKeys\030\002 \003(\014\"\025\n\023CreateTable" +
+      "Response\"\'\n\022DeleteTableRequest\022\021\n\ttableN" +
+      "ame\030\001 \002(\014\"\025\n\023DeleteTableResponse\"\'\n\022Enab" +
+      "leTableRequest\022\021\n\ttableName\030\001 \002(\014\"\025\n\023Ena" +
+      "bleTableResponse\"(\n\023DisableTableRequest\022" +
+      "\021\n\ttableName\030\001 \002(\014\"\026\n\024DisableTableRespon",
+      "se\"J\n\022ModifyTableRequest\022\021\n\ttableName\030\001 " +
+      "\002(\014\022!\n\013tableSchema\030\002 \002(\0132\014.TableSchema\"\025" +
+      "\n\023ModifyTableResponse\"\021\n\017ShutdownRequest" +
+      "\"\022\n\020ShutdownResponse\"\023\n\021StopMasterReques" +
+      "t\"\024\n\022StopMasterResponse\"\020\n\016BalanceReques" +
+      "t\"&\n\017BalanceResponse\022\023\n\013balancerRan\030\001 \002(" +
+      "\010\"<\n\031SetBalancerRunningRequest\022\n\n\002on\030\001 \002" +
+      "(\010\022\023\n\013synchronous\030\002 \001(\010\"6\n\032SetBalancerRu" +
+      "nningResponse\022\030\n\020prevBalanceValue\030\001 \001(\010\"" +
+      "\024\n\022CatalogScanRequest\")\n\023CatalogScanResp",
+      "onse\022\022\n\nscanResult\030\001 \001(\005\"-\n\033EnableCatalo" +
+      "gJanitorRequest\022\016\n\006enable\030\001 \002(\010\"1\n\034Enabl" +
+      "eCatalogJanitorResponse\022\021\n\tprevValue\030\001 \001" +
+      "(\010\" \n\036IsCatalogJanitorEnabledRequest\"0\n\037" +
+      "IsCatalogJanitorEnabledResponse\022\r\n\005value" +
+      "\030\001 \002(\010\"=\n\023TakeSnapshotRequest\022&\n\010snapsho" +
+      "t\030\001 \002(\0132\024.SnapshotDescription\"/\n\024TakeSna" +
+      "pshotResponse\022\027\n\017expectedTimeout\030\001 \002(\003\"\025" +
+      "\n\023ListSnapshotRequest\"?\n\024ListSnapshotRes" +
+      "ponse\022\'\n\tsnapshots\030\001 \003(\0132\024.SnapshotDescr",
+      "iption\"?\n\025DeleteSnapshotRequest\022&\n\010snaps" +
+      "hot\030\001 \002(\0132\024.SnapshotDescription\"\030\n\026Delet" +
+      "eSnapshotResponse\"@\n\026RestoreSnapshotRequ" +
+      "est\022&\n\010snapshot\030\001 \002(\0132\024.SnapshotDescript" +
+      "ion\"\031\n\027RestoreSnapshotResponse\"?\n\025IsSnap" +
+      "shotDoneRequest\022&\n\010snapshot\030\001 \001(\0132\024.Snap" +
+      "shotDescription\"U\n\026IsSnapshotDoneRespons" +
+      "e\022\023\n\004done\030\001 \001(\010:\005false\022&\n\010snapshot\030\002 \001(\013" +
+      "2\024.SnapshotDescription\"F\n\034IsRestoreSnaps" +
+      "hotDoneRequest\022&\n\010snapshot\030\001 \001(\0132\024.Snaps",
+      "hotDescription\"3\n\035IsRestoreSnapshotDoneR" +
+      "esponse\022\022\n\004done\030\001 \001(\010:\004true2\305\016\n\022MasterAd" +
+      "minService\0222\n\taddColumn\022\021.AddColumnReque" +
+      "st\032\022.AddColumnResponse\022;\n\014deleteColumn\022\024" +
+      ".DeleteColumnRequest\032\025.DeleteColumnRespo" +
+      "nse\022;\n\014modifyColumn\022\024.ModifyColumnReques" +
+      "t\032\025.ModifyColumnResponse\0225\n\nmoveRegion\022\022" +
+      ".MoveRegionRequest\032\023.MoveRegionResponse\022" +
+      "Y\n\026dispatchMergingRegions\022\036.DispatchMerg" +
+      "ingRegionsRequest\032\037.DispatchMergingRegio",
+      "nsResponse\022;\n\014assignRegion\022\024.AssignRegio" +
+      "nRequest\032\025.AssignRegionResponse\022A\n\016unass" +
+      "ignRegion\022\026.UnassignRegionRequest\032\027.Unas" +
+      "signRegionResponse\022>\n\rofflineRegion\022\025.Of" +
+      "flineRegionRequest\032\026.OfflineRegionRespon" +
+      "se\0228\n\013deleteTable\022\023.DeleteTableRequest\032\024" +
+      ".DeleteTableResponse\0228\n\013enableTable\022\023.En" +
+      "ableTableRequest\032\024.EnableTableResponse\022;" +
+      "\n\014disableTable\022\024.DisableTableRequest\032\025.D" +
+      "isableTableResponse\0228\n\013modifyTable\022\023.Mod",
+      "ifyTableRequest\032\024.ModifyTableResponse\0228\n" +
+      "\013createTable\022\023.CreateTableRequest\032\024.Crea" +
+      "teTableResponse\022/\n\010shutdown\022\020.ShutdownRe" +
+      "quest\032\021.ShutdownResponse\0225\n\nstopMaster\022\022" +
+      ".StopMasterRequest\032\023.StopMasterResponse\022" +
+      ",\n\007balance\022\017.BalanceRequest\032\020.BalanceRes" +
+      "ponse\022M\n\022setBalancerRunning\022\032.SetBalance" +
+      "rRunningRequest\032\033.SetBalancerRunningResp" +
+      "onse\022;\n\016runCatalogScan\022\023.CatalogScanRequ" +
+      "est\032\024.CatalogScanResponse\022S\n\024enableCatal",
+      "ogJanitor\022\034.EnableCatalogJanitorRequest\032" +
+      "\035.EnableCatalogJanitorResponse\022\\\n\027isCata" +
+      "logJanitorEnabled\022\037.IsCatalogJanitorEnab" +
+      "ledRequest\032 .IsCatalogJanitorEnabledResp" +
+      "onse\022L\n\021execMasterService\022\032.CoprocessorS" +
+      "erviceRequest\032\033.CoprocessorServiceRespon" +
+      "se\0227\n\010snapshot\022\024.TakeSnapshotRequest\032\025.T" +
+      "akeSnapshotResponse\022D\n\025getCompletedSnaps" +
+      "hots\022\024.ListSnapshotRequest\032\025.ListSnapsho" +
+      "tResponse\022A\n\016deleteSnapshot\022\026.DeleteSnap",
+      "shotRequest\032\027.DeleteSnapshotResponse\022A\n\016" +
+      "isSnapshotDone\022\026.IsSnapshotDoneRequest\032\027" +
+      ".IsSnapshotDoneResponse\022D\n\017restoreSnapsh" +
+      "ot\022\027.RestoreSnapshotRequest\032\030.RestoreSna" +
+      "pshotResponse\022V\n\025isRestoreSnapshotDone\022\035" +
+      ".IsRestoreSnapshotDoneRequest\032\036.IsRestor" +
+      "eSnapshotDoneResponse\022D\n\017isMasterRunning" +
+      "\022\027.IsMasterRunningRequest\032\030.IsMasterRunn" +
+      "ingResponseBG\n*org.apache.hadoop.hbase.p" +
+      "rotobuf.generatedB\021MasterAdminProtosH\001\210\001",
+      "\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -23222,6 +23290,7 @@ public final class MasterAdminProtos {
     com.google.protobuf.Descriptors.FileDescriptor
       .internalBuildGeneratedFileFrom(descriptorData,
         new com.google.protobuf.Descriptors.FileDescriptor[] {
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.getDescriptor(),
           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(),
           org.apache.hadoop.hbase.protobuf.generated.ClientProtos.getDescriptor(),
         }, assigner);

Modified: hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterMonitorProtos.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterMonitorProtos.java?rev=1478637&r1=1478636&r2=1478637&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterMonitorProtos.java (original)
+++ hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterMonitorProtos.java Fri May  3 03:52:15 2013
@@ -2632,6 +2632,11 @@ public final class MasterMonitorProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest request,
           com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse> done);
       
+      public abstract void isMasterRunning(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request,
+          com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse> done);
+      
     }
     
     public static com.google.protobuf.Service newReflectiveService(
@@ -2661,6 +2666,14 @@ public final class MasterMonitorProtos {
           impl.getClusterStatus(controller, request, done);
         }
         
+        @java.lang.Override
+        public  void isMasterRunning(
+            com.google.protobuf.RpcController controller,
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request,
+            com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse> done) {
+          impl.isMasterRunning(controller, request, done);
+        }
+        
       };
     }
     
@@ -2689,6 +2702,8 @@ public final class MasterMonitorProtos {
               return impl.getTableDescriptors(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest)request);
             case 2:
               return impl.getClusterStatus(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest)request);
+            case 3:
+              return impl.isMasterRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest)request);
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -2709,6 +2724,8 @@ public final class MasterMonitorProtos {
               return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest.getDefaultInstance();
             case 2:
               return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest.getDefaultInstance();
+            case 3:
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -2729,6 +2746,8 @@ public final class MasterMonitorProtos {
               return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse.getDefaultInstance();
             case 2:
               return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse.getDefaultInstance();
+            case 3:
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -2752,6 +2771,11 @@ public final class MasterMonitorProtos {
         org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest request,
         com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse> done);
     
+    public abstract void isMasterRunning(
+        com.google.protobuf.RpcController controller,
+        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request,
+        com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse> done);
+    
     public static final
         com.google.protobuf.Descriptors.ServiceDescriptor
         getDescriptor() {
@@ -2789,6 +2813,11 @@ public final class MasterMonitorProtos {
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse>specializeCallback(
               done));
           return;
+        case 3:
+          this.isMasterRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest)request,
+            com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse>specializeCallback(
+              done));
+          return;
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -2809,6 +2838,8 @@ public final class MasterMonitorProtos {
           return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest.getDefaultInstance();
         case 2:
           return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest.getDefaultInstance();
+        case 3:
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -2829,6 +2860,8 @@ public final class MasterMonitorProtos {
           return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse.getDefaultInstance();
         case 2:
           return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse.getDefaultInstance();
+        case 3:
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -2894,6 +2927,21 @@ public final class MasterMonitorProtos {
             org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse.class,
             org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse.getDefaultInstance()));
       }
+      
+      public  void isMasterRunning(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request,
+          com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse> done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(3),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance(),
+          com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.class,
+            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance()));
+      }
     }
     
     public static BlockingInterface newBlockingStub(
@@ -2916,6 +2964,11 @@ public final class MasterMonitorProtos {
           com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest request)
           throws com.google.protobuf.ServiceException;
+      
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse isMasterRunning(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request)
+          throws com.google.protobuf.ServiceException;
     }
     
     private static final class BlockingStub implements BlockingInterface {
@@ -2960,6 +3013,18 @@ public final class MasterMonitorProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse.getDefaultInstance());
       }
       
+      
+      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse isMasterRunning(
+          com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request)
+          throws com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(3),
+          controller,
+          request,
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance());
+      }
+      
     }
   }
   
@@ -3002,25 +3067,27 @@ public final class MasterMonitorProtos {
       descriptor;
   static {
     java.lang.String[] descriptorData = {
-      "\n\023MasterMonitor.proto\032\013hbase.proto\032\023Clus" +
-      "terStatus.proto\"0\n\033GetSchemaAlterStatusR" +
-      "equest\022\021\n\ttableName\030\001 \002(\014\"P\n\034GetSchemaAl" +
-      "terStatusResponse\022\032\n\022yetToUpdateRegions\030" +
-      "\001 \001(\r\022\024\n\014totalRegions\030\002 \001(\r\"0\n\032GetTableD" +
-      "escriptorsRequest\022\022\n\ntableNames\030\001 \003(\t\"@\n" +
-      "\033GetTableDescriptorsResponse\022!\n\013tableSch" +
-      "ema\030\001 \003(\0132\014.TableSchema\"\031\n\027GetClusterSta" +
-      "tusRequest\"A\n\030GetClusterStatusResponse\022%" +
-      "\n\rclusterStatus\030\001 \002(\0132\016.ClusterStatus2\206\002",
-      "\n\024MasterMonitorService\022S\n\024getSchemaAlter" +
-      "Status\022\034.GetSchemaAlterStatusRequest\032\035.G" +
-      "etSchemaAlterStatusResponse\022P\n\023getTableD" +
-      "escriptors\022\033.GetTableDescriptorsRequest\032" +
-      "\034.GetTableDescriptorsResponse\022G\n\020getClus" +
-      "terStatus\022\030.GetClusterStatusRequest\032\031.Ge" +
-      "tClusterStatusResponseBI\n*org.apache.had" +
-      "oop.hbase.protobuf.generatedB\023MasterMoni" +
-      "torProtosH\001\210\001\001\240\001\001"
+      "\n\023MasterMonitor.proto\032\014Master.proto\032\013hba" +
+      "se.proto\032\023ClusterStatus.proto\"0\n\033GetSche" +
+      "maAlterStatusRequest\022\021\n\ttableName\030\001 \002(\014\"" +
+      "P\n\034GetSchemaAlterStatusResponse\022\032\n\022yetTo" +
+      "UpdateRegions\030\001 \001(\r\022\024\n\014totalRegions\030\002 \001(" +
+      "\r\"0\n\032GetTableDescriptorsRequest\022\022\n\ntable" +
+      "Names\030\001 \003(\t\"@\n\033GetTableDescriptorsRespon" +
+      "se\022!\n\013tableSchema\030\001 \003(\0132\014.TableSchema\"\031\n" +
+      "\027GetClusterStatusRequest\"A\n\030GetClusterSt" +
+      "atusResponse\022%\n\rclusterStatus\030\001 \002(\0132\016.Cl",
+      "usterStatus2\314\002\n\024MasterMonitorService\022S\n\024" +
+      "getSchemaAlterStatus\022\034.GetSchemaAlterSta" +
+      "tusRequest\032\035.GetSchemaAlterStatusRespons" +
+      "e\022P\n\023getTableDescriptors\022\033.GetTableDescr" +
+      "iptorsRequest\032\034.GetTableDescriptorsRespo" +
+      "nse\022G\n\020getClusterStatus\022\030.GetClusterStat" +
+      "usRequest\032\031.GetClusterStatusResponse\022D\n\017" +
+      "isMasterRunning\022\027.IsMasterRunningRequest" +
+      "\032\030.IsMasterRunningResponseBI\n*org.apache" +
+      ".hadoop.hbase.protobuf.generatedB\023Master",
+      "MonitorProtosH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -3081,6 +3148,7 @@ public final class MasterMonitorProtos {
     com.google.protobuf.Descriptors.FileDescriptor
       .internalBuildGeneratedFileFrom(descriptorData,
         new com.google.protobuf.Descriptors.FileDescriptor[] {
+          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.getDescriptor(),
           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(),
           org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.getDescriptor(),
         }, assigner);

Modified: hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RPCProtos.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RPCProtos.java?rev=1478637&r1=1478636&r2=1478637&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RPCProtos.java (original)
+++ hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RPCProtos.java Fri May  3 03:52:15 2013
@@ -535,9 +535,9 @@ public final class RPCProtos {
     org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo();
     org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder();
     
-    // optional string protocol = 2 [default = "org.apache.hadoop.hbase.client.ClientProtocol"];
-    boolean hasProtocol();
-    String getProtocol();
+    // optional string serviceName = 2;
+    boolean hasServiceName();
+    String getServiceName();
     
     // optional string cellBlockCodecClass = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"];
     boolean hasCellBlockCodecClass();
@@ -589,14 +589,14 @@ public final class RPCProtos {
       return userInfo_;
     }
     
-    // optional string protocol = 2 [default = "org.apache.hadoop.hbase.client.ClientProtocol"];
-    public static final int PROTOCOL_FIELD_NUMBER = 2;
-    private java.lang.Object protocol_;
-    public boolean hasProtocol() {
+    // optional string serviceName = 2;
+    public static final int SERVICENAME_FIELD_NUMBER = 2;
+    private java.lang.Object serviceName_;
+    public boolean hasServiceName() {
       return ((bitField0_ & 0x00000002) == 0x00000002);
     }
-    public String getProtocol() {
-      java.lang.Object ref = protocol_;
+    public String getServiceName() {
+      java.lang.Object ref = serviceName_;
       if (ref instanceof String) {
         return (String) ref;
       } else {
@@ -604,17 +604,17 @@ public final class RPCProtos {
             (com.google.protobuf.ByteString) ref;
         String s = bs.toStringUtf8();
         if (com.google.protobuf.Internal.isValidUtf8(bs)) {
-          protocol_ = s;
+          serviceName_ = s;
         }
         return s;
       }
     }
-    private com.google.protobuf.ByteString getProtocolBytes() {
-      java.lang.Object ref = protocol_;
+    private com.google.protobuf.ByteString getServiceNameBytes() {
+      java.lang.Object ref = serviceName_;
       if (ref instanceof String) {
         com.google.protobuf.ByteString b = 
             com.google.protobuf.ByteString.copyFromUtf8((String) ref);
-        protocol_ = b;
+        serviceName_ = b;
         return b;
       } else {
         return (com.google.protobuf.ByteString) ref;
@@ -687,7 +687,7 @@ public final class RPCProtos {
     
     private void initFields() {
       userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
-      protocol_ = "org.apache.hadoop.hbase.client.ClientProtocol";
+      serviceName_ = "";
       cellBlockCodecClass_ = "org.apache.hadoop.hbase.codec.KeyValueCodec";
       cellBlockCompressorClass_ = "";
     }
@@ -713,7 +713,7 @@ public final class RPCProtos {
         output.writeMessage(1, userInfo_);
       }
       if (((bitField0_ & 0x00000002) == 0x00000002)) {
-        output.writeBytes(2, getProtocolBytes());
+        output.writeBytes(2, getServiceNameBytes());
       }
       if (((bitField0_ & 0x00000004) == 0x00000004)) {
         output.writeBytes(3, getCellBlockCodecClassBytes());
@@ -736,7 +736,7 @@ public final class RPCProtos {
       }
       if (((bitField0_ & 0x00000002) == 0x00000002)) {
         size += com.google.protobuf.CodedOutputStream
-          .computeBytesSize(2, getProtocolBytes());
+          .computeBytesSize(2, getServiceNameBytes());
       }
       if (((bitField0_ & 0x00000004) == 0x00000004)) {
         size += com.google.protobuf.CodedOutputStream
@@ -774,10 +774,10 @@ public final class RPCProtos {
         result = result && getUserInfo()
             .equals(other.getUserInfo());
       }
-      result = result && (hasProtocol() == other.hasProtocol());
-      if (hasProtocol()) {
-        result = result && getProtocol()
-            .equals(other.getProtocol());
+      result = result && (hasServiceName() == other.hasServiceName());
+      if (hasServiceName()) {
+        result = result && getServiceName()
+            .equals(other.getServiceName());
       }
       result = result && (hasCellBlockCodecClass() == other.hasCellBlockCodecClass());
       if (hasCellBlockCodecClass()) {
@@ -802,9 +802,9 @@ public final class RPCProtos {
         hash = (37 * hash) + USERINFO_FIELD_NUMBER;
         hash = (53 * hash) + getUserInfo().hashCode();
       }
-      if (hasProtocol()) {
-        hash = (37 * hash) + PROTOCOL_FIELD_NUMBER;
-        hash = (53 * hash) + getProtocol().hashCode();
+      if (hasServiceName()) {
+        hash = (37 * hash) + SERVICENAME_FIELD_NUMBER;
+        hash = (53 * hash) + getServiceName().hashCode();
       }
       if (hasCellBlockCodecClass()) {
         hash = (37 * hash) + CELLBLOCKCODECCLASS_FIELD_NUMBER;
@@ -937,7 +937,7 @@ public final class RPCProtos {
           userInfoBuilder_.clear();
         }
         bitField0_ = (bitField0_ & ~0x00000001);
-        protocol_ = "org.apache.hadoop.hbase.client.ClientProtocol";
+        serviceName_ = "";
         bitField0_ = (bitField0_ & ~0x00000002);
         cellBlockCodecClass_ = "org.apache.hadoop.hbase.codec.KeyValueCodec";
         bitField0_ = (bitField0_ & ~0x00000004);
@@ -992,7 +992,7 @@ public final class RPCProtos {
         if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
           to_bitField0_ |= 0x00000002;
         }
-        result.protocol_ = protocol_;
+        result.serviceName_ = serviceName_;
         if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
           to_bitField0_ |= 0x00000004;
         }
@@ -1020,8 +1020,8 @@ public final class RPCProtos {
         if (other.hasUserInfo()) {
           mergeUserInfo(other.getUserInfo());
         }
-        if (other.hasProtocol()) {
-          setProtocol(other.getProtocol());
+        if (other.hasServiceName()) {
+          setServiceName(other.getServiceName());
         }
         if (other.hasCellBlockCodecClass()) {
           setCellBlockCodecClass(other.getCellBlockCodecClass());
@@ -1077,7 +1077,7 @@ public final class RPCProtos {
             }
             case 18: {
               bitField0_ |= 0x00000002;
-              protocol_ = input.readBytes();
+              serviceName_ = input.readBytes();
               break;
             }
             case 26: {
@@ -1186,39 +1186,39 @@ public final class RPCProtos {
         return userInfoBuilder_;
       }
       
-      // optional string protocol = 2 [default = "org.apache.hadoop.hbase.client.ClientProtocol"];
-      private java.lang.Object protocol_ = "org.apache.hadoop.hbase.client.ClientProtocol";
-      public boolean hasProtocol() {
+      // optional string serviceName = 2;
+      private java.lang.Object serviceName_ = "";
+      public boolean hasServiceName() {
         return ((bitField0_ & 0x00000002) == 0x00000002);
       }
-      public String getProtocol() {
-        java.lang.Object ref = protocol_;
+      public String getServiceName() {
+        java.lang.Object ref = serviceName_;
         if (!(ref instanceof String)) {
           String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
-          protocol_ = s;
+          serviceName_ = s;
           return s;
         } else {
           return (String) ref;
         }
       }
-      public Builder setProtocol(String value) {
+      public Builder setServiceName(String value) {
         if (value == null) {
     throw new NullPointerException();
   }
   bitField0_ |= 0x00000002;
-        protocol_ = value;
+        serviceName_ = value;
         onChanged();
         return this;
       }
-      public Builder clearProtocol() {
+      public Builder clearServiceName() {
         bitField0_ = (bitField0_ & ~0x00000002);
-        protocol_ = getDefaultInstance().getProtocol();
+        serviceName_ = getDefaultInstance().getServiceName();
         onChanged();
         return this;
       }
-      void setProtocol(com.google.protobuf.ByteString value) {
+      void setServiceName(com.google.protobuf.ByteString value) {
         bitField0_ |= 0x00000002;
-        protocol_ = value;
+        serviceName_ = value;
         onChanged();
       }
       
@@ -3982,25 +3982,23 @@ public final class RPCProtos {
     java.lang.String[] descriptorData = {
       "\n\tRPC.proto\032\rTracing.proto\032\013hbase.proto\"" +
       ":\n\017UserInformation\022\025\n\reffectiveUser\030\001 \002(" +
-      "\t\022\020\n\010realUser\030\002 \001(\t\"\343\001\n\020ConnectionHeader" +
-      "\022\"\n\010userInfo\030\001 \001(\0132\020.UserInformation\022?\n\010" +
-      "protocol\030\002 \001(\t:-org.apache.hadoop.hbase." +
-      "client.ClientProtocol\022H\n\023cellBlockCodecC" +
-      "lass\030\003 \001(\t:+org.apache.hadoop.hbase.code" +
-      "c.KeyValueCodec\022 \n\030cellBlockCompressorCl" +
-      "ass\030\004 \001(\t\"\037\n\rCellBlockMeta\022\016\n\006length\030\001 \001" +
-      "(\r\"w\n\021ExceptionResponse\022\032\n\022exceptionClas",
-      "sName\030\001 \001(\t\022\022\n\nstackTrace\030\002 \001(\t\022\020\n\010hostn" +
-      "ame\030\003 \001(\t\022\014\n\004port\030\004 \001(\005\022\022\n\ndoNotRetry\030\005 " +
-      "\001(\010\"\216\001\n\rRequestHeader\022\016\n\006callId\030\001 \001(\r\022\034\n" +
-      "\ttraceInfo\030\002 \001(\0132\t.RPCTInfo\022\022\n\nmethodNam" +
-      "e\030\003 \001(\t\022\024\n\014requestParam\030\004 \001(\010\022%\n\rcellBlo" +
-      "ckMeta\030\005 \001(\0132\016.CellBlockMeta\"n\n\016Response" +
-      "Header\022\016\n\006callId\030\001 \001(\r\022%\n\texception\030\002 \001(" +
-      "\0132\022.ExceptionResponse\022%\n\rcellBlockMeta\030\003" +
-      " \001(\0132\016.CellBlockMetaB<\n*org.apache.hadoo" +
-      "p.hbase.protobuf.generatedB\tRPCProtosH\001\240",
-      "\001\001"
+      "\t\022\020\n\010realUser\030\002 \001(\t\"\267\001\n\020ConnectionHeader" +
+      "\022\"\n\010userInfo\030\001 \001(\0132\020.UserInformation\022\023\n\013" +
+      "serviceName\030\002 \001(\t\022H\n\023cellBlockCodecClass" +
+      "\030\003 \001(\t:+org.apache.hadoop.hbase.codec.Ke" +
+      "yValueCodec\022 \n\030cellBlockCompressorClass\030" +
+      "\004 \001(\t\"\037\n\rCellBlockMeta\022\016\n\006length\030\001 \001(\r\"w" +
+      "\n\021ExceptionResponse\022\032\n\022exceptionClassNam" +
+      "e\030\001 \001(\t\022\022\n\nstackTrace\030\002 \001(\t\022\020\n\010hostname\030",
+      "\003 \001(\t\022\014\n\004port\030\004 \001(\005\022\022\n\ndoNotRetry\030\005 \001(\010\"" +
+      "\216\001\n\rRequestHeader\022\016\n\006callId\030\001 \001(\r\022\034\n\ttra" +
+      "ceInfo\030\002 \001(\0132\t.RPCTInfo\022\022\n\nmethodName\030\003 " +
+      "\001(\t\022\024\n\014requestParam\030\004 \001(\010\022%\n\rcellBlockMe" +
+      "ta\030\005 \001(\0132\016.CellBlockMeta\"n\n\016ResponseHead" +
+      "er\022\016\n\006callId\030\001 \001(\r\022%\n\texception\030\002 \001(\0132\022." +
+      "ExceptionResponse\022%\n\rcellBlockMeta\030\003 \001(\013" +
+      "2\016.CellBlockMetaB<\n*org.apache.hadoop.hb" +
+      "ase.protobuf.generatedB\tRPCProtosH\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -4020,7 +4018,7 @@ public final class RPCProtos {
           internal_static_ConnectionHeader_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_ConnectionHeader_descriptor,
-              new java.lang.String[] { "UserInfo", "Protocol", "CellBlockCodecClass", "CellBlockCompressorClass", },
+              new java.lang.String[] { "UserInfo", "ServiceName", "CellBlockCodecClass", "CellBlockCompressorClass", },
               org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader.class,
               org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader.Builder.class);
           internal_static_CellBlockMeta_descriptor =

Modified: hbase/trunk/hbase-protocol/src/main/protobuf/MasterAdmin.proto
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/protobuf/MasterAdmin.proto?rev=1478637&r1=1478636&r2=1478637&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/protobuf/MasterAdmin.proto (original)
+++ hbase/trunk/hbase-protocol/src/main/protobuf/MasterAdmin.proto Fri May  3 03:52:15 2013
@@ -18,6 +18,8 @@
 
 // This file contains protocol buffers that are used for MasterAdminProtocol.
 
+import "Master.proto";
+
 option java_package = "org.apache.hadoop.hbase.protobuf.generated";
 option java_outer_classname = "MasterAdminProtos";
 option java_generic_services = true;
@@ -354,7 +356,7 @@ service MasterAdminService {
 
   /**
    * List completed snapshots.
-   * @return a list of snapshot descriptors for completed snapshots
+   * Returns a list of snapshot descriptors for completed snapshots
    */
   rpc getCompletedSnapshots(ListSnapshotRequest) returns(ListSnapshotResponse);
 
@@ -379,4 +381,7 @@ service MasterAdminService {
    * Determine if the snapshot restore is done yet.
    */
   rpc isRestoreSnapshotDone(IsRestoreSnapshotDoneRequest) returns(IsRestoreSnapshotDoneResponse);
+
+  /** return true if master is available */
+  rpc isMasterRunning(IsMasterRunningRequest) returns(IsMasterRunningResponse);
 }

Modified: hbase/trunk/hbase-protocol/src/main/protobuf/MasterMonitor.proto
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/protobuf/MasterMonitor.proto?rev=1478637&r1=1478636&r2=1478637&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/protobuf/MasterMonitor.proto (original)
+++ hbase/trunk/hbase-protocol/src/main/protobuf/MasterMonitor.proto Fri May  3 03:52:15 2013
@@ -17,6 +17,7 @@
  */
 
 // This file contains protocol buffers that are used for MasterMonitorProtocol.
+import "Master.proto";
 
 option java_package = "org.apache.hadoop.hbase.protobuf.generated";
 option java_outer_classname = "MasterMonitorProtos";
@@ -63,4 +64,7 @@ service MasterMonitorService {
   /** Return cluster status. */
   rpc getClusterStatus(GetClusterStatusRequest)
     returns(GetClusterStatusResponse);
+
+  /** return true if master is available */
+  rpc isMasterRunning(IsMasterRunningRequest) returns(IsMasterRunningResponse);
 }

Modified: hbase/trunk/hbase-protocol/src/main/protobuf/RPC.proto
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/protobuf/RPC.proto?rev=1478637&r1=1478636&r2=1478637&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/protobuf/RPC.proto (original)
+++ hbase/trunk/hbase-protocol/src/main/protobuf/RPC.proto Fri May  3 03:52:15 2013
@@ -79,7 +79,7 @@ message UserInformation {
 // This is sent on connection setup after the connection preamble is sent.
 message ConnectionHeader {
   optional UserInformation userInfo = 1;
-  optional string protocol = 2 [default = "org.apache.hadoop.hbase.client.ClientProtocol"];
+  optional string serviceName = 2;
   // Cell block codec we will use sending over optional cell blocks.  Server throws exception
   // if cannot deal.
   optional string cellBlockCodecClass = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"];

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java?rev=1478637&r1=1478636&r2=1478637&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java Fri May  3 03:52:15 2013
@@ -53,10 +53,9 @@ public class ZNodeClearer {
    */
   public static void writeMyEphemeralNodeOnDisk(String fileContent) {
     String fileName = ZNodeClearer.getMyEphemeralNodeFileName();
-
     if (fileName == null) {
-      LOG.warn("No filename given to save the znode used, it won't be saved " +
-          "(Environment variable HBASE_ZNODE_FILE is not set).");
+      LOG.warn("Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared " +
+        "on crash by start scripts (Longer MTTR!)");
       return;
     }
 

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java?rev=1478637&r1=1478636&r2=1478637&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java Fri May  3 03:52:15 2013
@@ -196,8 +196,6 @@ public class HFileSystem extends FilterF
    * @return true if the interceptor was added, false otherwise.
    */
   static boolean addLocationsOrderInterceptor(Configuration conf, final ReorderBlocks lrb) {
-    LOG.debug("Starting addLocationsOrderInterceptor with class " + lrb.getClass());
-
     if (!conf.getBoolean("hbase.filesystem.reorder.blocks", true)) {  // activated by default
       LOG.debug("addLocationsOrderInterceptor configured to false");
       return false;
@@ -212,8 +210,8 @@ public class HFileSystem extends FilterF
     }
 
     if (!(fs instanceof DistributedFileSystem)) {
-      LOG.warn("The file system is not a DistributedFileSystem." +
-          "Not adding block location reordering");
+      LOG.debug("The file system is not a DistributedFileSystem. " +
+          "Skipping on block location reordering");
       return false;
     }
 
@@ -243,7 +241,8 @@ public class HFileSystem extends FilterF
 
       ClientProtocol cp1 = createReorderingProxy(namenode, lrb, conf);
       nf.set(dfsc, cp1);
-      LOG.info("Added intercepting call to namenode#getBlockLocations");
+      LOG.info("Added intercepting call to namenode#getBlockLocations so can do block reordering" +
+        " using class " + lrb.getClass());
     } catch (NoSuchFieldException e) {
       LOG.warn("Can't modify the DFSClient#namenode field to add the location reorder.", e);
       return false;

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java?rev=1478637&r1=1478636&r2=1478637&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java Fri May  3 03:52:15 2013
@@ -347,8 +347,7 @@ public class CacheConfig {
    * @param conf  The current configuration.
    * @return The block cache or <code>null</code>.
    */
-  private static synchronized BlockCache instantiateBlockCache(
-      Configuration conf) {
+  private static synchronized BlockCache instantiateBlockCache(Configuration conf) {
     if (globalBlockCache != null) return globalBlockCache;
     if (blockCacheDisabled) return null;
 
@@ -366,14 +365,12 @@ public class CacheConfig {
     // Calculate the amount of heap to give the heap.
     MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
     long lruCacheSize = (long) (mu.getMax() * cachePercentage);
-    int blockSize = conf.getInt("hbase.offheapcache.minblocksize",
-        HConstants.DEFAULT_BLOCKSIZE);
+    int blockSize = conf.getInt("hbase.offheapcache.minblocksize", HConstants.DEFAULT_BLOCKSIZE);
     long offHeapCacheSize =
       (long) (conf.getFloat("hbase.offheapcache.percentage", (float) 0) *
           DirectMemoryUtils.getDirectMemorySize());
     if (offHeapCacheSize <= 0) {
-      String bucketCacheIOEngineName = conf
-          .get(BUCKET_CACHE_IOENGINE_KEY, null);
+      String bucketCacheIOEngineName = conf.get(BUCKET_CACHE_IOENGINE_KEY, null);
       float bucketCachePercentage = conf.getFloat(BUCKET_CACHE_SIZE_KEY, 0F);
       // A percentage of max heap size or a absolute value with unit megabytes
       long bucketCacheSize = (long) (bucketCachePercentage < 1 ? mu.getMax()
@@ -407,10 +404,9 @@ public class CacheConfig {
           throw new RuntimeException(ioex);
         }
       }
-      LOG.info("Allocating LruBlockCache with maximum size "
-          + StringUtils.humanReadableInt(lruCacheSize));
-      LruBlockCache lruCache = new LruBlockCache(lruCacheSize,
-          StoreFile.DEFAULT_BLOCKSIZE_SMALL);
+      LOG.info("Allocating LruBlockCache with maximum size " +
+        StringUtils.humanReadableInt(lruCacheSize));
+      LruBlockCache lruCache = new LruBlockCache(lruCacheSize, StoreFile.DEFAULT_BLOCKSIZE_SMALL);
       lruCache.setVictimCache(bucketCache);
       if (bucketCache != null && combinedWithLru) {
         globalBlockCache = new CombinedBlockCache(lruCache, bucketCache);

Added: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/EmptyServiceNameException.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/EmptyServiceNameException.java?rev=1478637&view=auto
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/EmptyServiceNameException.java (added)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/EmptyServiceNameException.java Fri May  3 03:52:15 2013
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.ipc;
+
+@SuppressWarnings("serial")
+public class EmptyServiceNameException extends FatalConnectionException {}
\ No newline at end of file

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java?rev=1478637&r1=1478636&r2=1478637&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java Fri May  3 03:52:15 2013
@@ -21,9 +21,9 @@ package org.apache.hadoop.hbase.ipc;
 
 public class MetricsHBaseServerWrapperImpl implements MetricsHBaseServerWrapper {
 
-  private HBaseServer server;
+  private RpcServer server;
 
-  MetricsHBaseServerWrapperImpl(HBaseServer server) {
+  MetricsHBaseServerWrapperImpl(RpcServer server) {
     this.server = server;
   }
 



Mime
View raw message