hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jxi...@apache.org
Subject svn commit: r1581479 [3/9] - in /hbase/trunk: hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ hbase-it/src/test/java/org/apache/hadoop/hbase/ hbase-it/src/test/java/org/apache/hadoo...
Date Tue, 25 Mar 2014 19:34:55 GMT
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java?rev=1581479&r1=1581478&r2=1581479&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java Tue Mar 25 19:34:52 2014
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.ServerNam
 import org.apache.hadoop.hbase.monitoring.LogMonitoring;
 import org.apache.hadoop.hbase.monitoring.StateDumpServlet;
 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
+import org.apache.hadoop.hbase.regionserver.RSDumpServlet;
 import org.apache.hadoop.util.ReflectionUtils;
 
 @InterfaceAudience.Private
@@ -94,7 +95,12 @@ public class MasterDumpServlet extends S
     out.println(LINE);
     long tailKb = getTailKbParam(request);
     LogMonitoring.dumpTailOfLogs(out, tailKb);
-    
+
+    out.println("\n\nRS Queue:");
+    out.println(LINE);
+    if(isShowQueueDump(conf)) {
+      RSDumpServlet.dumpQueue(master, out);
+    }
     out.flush();
   }
   

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java?rev=1581479&r1=1581478&r2=1581479&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java Tue Mar 25 19:34:52 2014
@@ -105,7 +105,7 @@ public class MasterFileSystem {
     }
   };
 
-  public MasterFileSystem(Server master, MasterServices services, boolean masterRecovery)
+  public MasterFileSystem(Server master, MasterServices services)
   throws IOException {
     this.conf = master.getConfiguration();
     this.master = master;
@@ -129,7 +129,7 @@ public class MasterFileSystem {
     HFileSystem.addLocationsOrderInterceptor(conf);
     this.splitLogManager = new SplitLogManager(master.getZooKeeper(),
       master.getConfiguration(), master, services,
-      master.getServerName(), masterRecovery);
+      master.getServerName());
   }
 
   /**

Added: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java?rev=1581479&view=auto
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java (added)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java Tue Mar 25 19:34:52 2014
@@ -0,0 +1,1194 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.ServerLoad;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.UnknownRegionException;
+import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.exceptions.MergeRegionException;
+import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
+import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
+import org.apache.hadoop.hbase.ipc.ServerRpcController;
+import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.RequestConverter;
+import org.apache.hadoop.hbase.protobuf.ResponseConverter;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionResponse;
+import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
+import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
+import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
+import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse;
+import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
+import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse;
+import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService;
+import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest;
+import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse;
+import org.apache.hadoop.hbase.regionserver.RSRpcServices;
+import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.zookeeper.KeeperException;
+
+import com.google.protobuf.Descriptors;
+import com.google.protobuf.Message;
+import com.google.protobuf.RpcCallback;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.Service;
+import com.google.protobuf.ServiceException;
+
+/**
+ * Implements the master RPC services.
+ */
+@InterfaceAudience.Private
+public class MasterRpcServices extends RSRpcServices
+    implements MasterService.BlockingInterface, RegionServerStatusService.BlockingInterface {
+  protected static final Log LOG = LogFactory.getLog(MasterRpcServices.class.getName());
+
+  private final HMaster master;
+
+  /**
+   * @return Subset of configuration to pass initializing regionservers: e.g.
+   * the filesystem to use and root directory to use.
+   */
+  private RegionServerStartupResponse.Builder createConfigurationSubset() {
+    RegionServerStartupResponse.Builder resp = addConfig(
+      RegionServerStartupResponse.newBuilder(), HConstants.HBASE_DIR);
+    return addConfig(resp, "fs.default.name");
+  }
+
+  private RegionServerStartupResponse.Builder addConfig(
+      final RegionServerStartupResponse.Builder resp, final String key) {
+    NameStringPair.Builder entry = NameStringPair.newBuilder()
+      .setName(key)
+      .setValue(master.getConfiguration().get(key));
+    resp.addMapEntries(entry.build());
+    return resp;
+  }
+
+  public MasterRpcServices(HMaster m) throws IOException {
+    super(m);
+    master = m;
+  }
+
+  enum BalanceSwitchMode {
+    SYNC,
+    ASYNC
+  }
+
+  /**
+   * Assigns balancer switch according to BalanceSwitchMode
+   * @param b new balancer switch
+   * @param mode BalanceSwitchMode
+   * @return old balancer switch
+   */
+  boolean switchBalancer(final boolean b, BalanceSwitchMode mode) throws IOException {
+    boolean oldValue = master.loadBalancerTracker.isBalancerOn();
+    boolean newValue = b;
+    try {
+      if (master.cpHost != null) {
+        newValue = master.cpHost.preBalanceSwitch(newValue);
+      }
+      try {
+        if (mode == BalanceSwitchMode.SYNC) {
+          synchronized (master.balancer) {
+            master.loadBalancerTracker.setBalancerOn(newValue);
+          }
+        } else {
+          master.loadBalancerTracker.setBalancerOn(newValue);
+        }
+      } catch (KeeperException ke) {
+        throw new IOException(ke);
+      }
+      LOG.info(master.getClientIdAuditPrefix() + " set balanceSwitch=" + newValue);
+      if (master.cpHost != null) {
+        master.cpHost.postBalanceSwitch(oldValue, newValue);
+      }
+    } catch (IOException ioe) {
+      LOG.warn("Error flipping balance switch", ioe);
+    }
+    return oldValue;
+  }
+
+  boolean synchronousBalanceSwitch(final boolean b) throws IOException {
+    return switchBalancer(b, BalanceSwitchMode.SYNC);
+  }
+
+  /**
+   * @return list of blocking services and their security info classes that this server supports
+   */
+  protected List<BlockingServiceAndInterface> getServices() {
+    List<BlockingServiceAndInterface> bssi = new ArrayList<BlockingServiceAndInterface>(4);
+    bssi.add(new BlockingServiceAndInterface(
+      MasterService.newReflectiveBlockingService(this),
+      MasterService.BlockingInterface.class));
+    bssi.add(new BlockingServiceAndInterface(
+      RegionServerStatusService.newReflectiveBlockingService(this),
+      RegionServerStatusService.BlockingInterface.class));
+    bssi.addAll(super.getServices());
+    return bssi;
+  }
+
+  @Override
+  public GetLastFlushedSequenceIdResponse getLastFlushedSequenceId(RpcController controller,
+      GetLastFlushedSequenceIdRequest request) throws ServiceException {
+    try {
+      master.checkServiceStarted();
+    } catch (IOException ioe) {
+      throw new ServiceException(ioe);
+    }
+    byte[] regionName = request.getRegionName().toByteArray();
+    long seqId = master.serverManager.getLastFlushedSequenceId(regionName);
+    return ResponseConverter.buildGetLastFlushedSequenceIdResponse(seqId);
+  }
+
+  @Override
+  public RegionServerReportResponse regionServerReport(
+      RpcController controller, RegionServerReportRequest request) throws ServiceException {
+    try {
+      master.checkServiceStarted();
+      ClusterStatusProtos.ServerLoad sl = request.getLoad();
+      ServerName serverName = ProtobufUtil.toServerName(request.getServer());
+      ServerLoad oldLoad = master.serverManager.getLoad(serverName);
+      master.serverManager.regionServerReport(serverName, new ServerLoad(sl));
+      if (sl != null && master.metricsMaster != null) {
+        // Up our metrics.
+        master.metricsMaster.incrementRequests(sl.getTotalNumberOfRequests()
+          - (oldLoad != null ? oldLoad.getTotalNumberOfRequests() : 0));
+      }
+    } catch (IOException ioe) {
+      throw new ServiceException(ioe);
+    }
+    return RegionServerReportResponse.newBuilder().build();
+  }
+
+  @Override
+  public RegionServerStartupResponse regionServerStartup(
+      RpcController controller, RegionServerStartupRequest request) throws ServiceException {
+    // Register with server manager
+    try {
+      master.checkServiceStarted();
+      InetAddress ia = master.getRemoteInetAddress(
+        request.getPort(), request.getServerStartCode());
+      ServerName rs = master.serverManager.regionServerStartup(ia, request.getPort(),
+        request.getServerStartCode(), request.getServerCurrentTime());
+
+      // Send back some config info
+      RegionServerStartupResponse.Builder resp = createConfigurationSubset();
+      NameStringPair.Builder entry = NameStringPair.newBuilder()
+        .setName(HConstants.KEY_FOR_HOSTNAME_SEEN_BY_MASTER)
+        .setValue(rs.getHostname());
+      resp.addMapEntries(entry.build());
+
+      return resp.build();
+    } catch (IOException ioe) {
+      throw new ServiceException(ioe);
+    }
+  }
+
+  @Override
+  public ReportRSFatalErrorResponse reportRSFatalError(
+      RpcController controller, ReportRSFatalErrorRequest request) throws ServiceException {
+    String errorText = request.getErrorMessage();
+    ServerName sn = ProtobufUtil.toServerName(request.getServer());
+    String msg = "Region server " + sn
+      + " reported a fatal error:\n" + errorText;
+    LOG.error(msg);
+    master.rsFatals.add(msg);
+    return ReportRSFatalErrorResponse.newBuilder().build();
+  }
+
+  @Override
+  public AddColumnResponse addColumn(RpcController controller,
+      AddColumnRequest req) throws ServiceException {
+    try {
+      master.addColumn(ProtobufUtil.toTableName(req.getTableName()),
+        HColumnDescriptor.convert(req.getColumnFamilies()));
+    } catch (IOException ioe) {
+      throw new ServiceException(ioe);
+    }
+    return AddColumnResponse.newBuilder().build();
+  }
+
+  @Override
+  public AssignRegionResponse assignRegion(RpcController controller,
+      AssignRegionRequest req) throws ServiceException {
+    try {
+      final byte [] regionName = req.getRegion().getValue().toByteArray();
+      RegionSpecifierType type = req.getRegion().getType();
+      AssignRegionResponse arr = AssignRegionResponse.newBuilder().build();
+
+      master.checkInitialized();
+      if (type != RegionSpecifierType.REGION_NAME) {
+        LOG.warn("assignRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME
+          + " actual: " + type);
+      }
+      RegionStates regionStates = master.assignmentManager.getRegionStates();
+      HRegionInfo regionInfo = regionStates.getRegionInfo(regionName);
+      if (regionInfo == null) throw new UnknownRegionException(Bytes.toString(regionName));
+      if (master.cpHost != null) {
+        if (master.cpHost.preAssign(regionInfo)) {
+          return arr;
+        }
+      }
+      LOG.info(master.getClientIdAuditPrefix()
+        + " assign " + regionInfo.getRegionNameAsString());
+      master.assignmentManager.assign(regionInfo, true, true);
+      if (master.cpHost != null) {
+        master.cpHost.postAssign(regionInfo);
+      }
+      return arr;
+    } catch (IOException ioe) {
+      throw new ServiceException(ioe);
+    }
+  }
+
+  @Override
+  public BalanceResponse balance(RpcController controller,
+      BalanceRequest request) throws ServiceException {
+    try {
+      return BalanceResponse.newBuilder().setBalancerRan(master.balance()).build();
+    } catch (IOException ex) {
+      throw new ServiceException(ex);
+    }
+  }
+
+  @Override
+  public CreateNamespaceResponse createNamespace(RpcController controller,
+     CreateNamespaceRequest request) throws ServiceException {
+    try {
+      master.createNamespace(ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()));
+      return CreateNamespaceResponse.getDefaultInstance();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public CreateTableResponse createTable(RpcController controller, CreateTableRequest req)
+  throws ServiceException {
+    HTableDescriptor hTableDescriptor = HTableDescriptor.convert(req.getTableSchema());
+    byte [][] splitKeys = ProtobufUtil.getSplitKeysArray(req);
+    try {
+      master.createTable(hTableDescriptor, splitKeys);
+    } catch (IOException ioe) {
+      throw new ServiceException(ioe);
+    }
+    return CreateTableResponse.newBuilder().build();
+  }
+
+  @Override
+  public DeleteColumnResponse deleteColumn(RpcController controller,
+      DeleteColumnRequest req) throws ServiceException {
+    try {
+      master.deleteColumn(ProtobufUtil.toTableName(req.getTableName()),
+        req.getColumnName().toByteArray());
+    } catch (IOException ioe) {
+      throw new ServiceException(ioe);
+    }
+    return DeleteColumnResponse.newBuilder().build();
+  }
+
+  @Override
+  public DeleteNamespaceResponse deleteNamespace(RpcController controller,
+      DeleteNamespaceRequest request) throws ServiceException {
+    try {
+      master.deleteNamespace(request.getNamespaceName());
+      return DeleteNamespaceResponse.getDefaultInstance();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  /**
+   * Execute Delete Snapshot operation.
+   * @return DeleteSnapshotResponse (a protobuf wrapped void) if the snapshot existed and was
+   *    deleted properly.
+   * @throws ServiceException wrapping SnapshotDoesNotExistException if specified snapshot did not
+   *    exist.
+   */
+  @Override
+  public DeleteSnapshotResponse deleteSnapshot(RpcController controller,
+      DeleteSnapshotRequest request) throws ServiceException {
+    try {
+      master.checkInitialized();
+      master.snapshotManager.checkSnapshotSupport();
+
+      LOG.info(master.getClientIdAuditPrefix() + " delete " + request.getSnapshot());
+      master.snapshotManager.deleteSnapshot(request.getSnapshot());
+      return DeleteSnapshotResponse.newBuilder().build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public DeleteTableResponse deleteTable(RpcController controller,
+      DeleteTableRequest request) throws ServiceException {
+    try {
+      master.deleteTable(ProtobufUtil.toTableName(request.getTableName()));
+    } catch (IOException ioe) {
+      throw new ServiceException(ioe);
+    }
+    return DeleteTableResponse.newBuilder().build();
+  }
+
+  @Override
+  public DisableTableResponse disableTable(RpcController controller,
+      DisableTableRequest request) throws ServiceException {
+    try {
+      master.disableTable(ProtobufUtil.toTableName(request.getTableName()));
+    } catch (IOException ioe) {
+      throw new ServiceException(ioe);
+    }
+    return DisableTableResponse.newBuilder().build();
+  }
+
+  @Override
+  public DispatchMergingRegionsResponse dispatchMergingRegions(RpcController c,
+      DispatchMergingRegionsRequest request) throws ServiceException {
+    try {
+      master.checkInitialized();
+    } catch (IOException ioe) {
+      throw new ServiceException(ioe);
+    }
+
+    final byte[] encodedNameOfRegionA = request.getRegionA().getValue()
+      .toByteArray();
+    final byte[] encodedNameOfRegionB = request.getRegionB().getValue()
+      .toByteArray();
+    final boolean forcible = request.getForcible();
+    if (request.getRegionA().getType() != RegionSpecifierType.ENCODED_REGION_NAME
+        || request.getRegionB().getType() != RegionSpecifierType.ENCODED_REGION_NAME) {
+      LOG.warn("mergeRegions specifier type: expected: "
+        + RegionSpecifierType.ENCODED_REGION_NAME + " actual: region_a="
+        + request.getRegionA().getType() + ", region_b="
+        + request.getRegionB().getType());
+    }
+    RegionStates regionStates = master.assignmentManager.getRegionStates();
+    RegionState regionStateA = regionStates.getRegionState(Bytes.toString(encodedNameOfRegionA));
+    RegionState regionStateB = regionStates.getRegionState(Bytes.toString(encodedNameOfRegionB));
+    if (regionStateA == null || regionStateB == null) {
+      throw new ServiceException(new UnknownRegionException(
+          Bytes.toStringBinary(regionStateA == null ? encodedNameOfRegionA
+              : encodedNameOfRegionB)));
+    }
+
+    if (!regionStateA.isOpened() || !regionStateB.isOpened()) {
+      throw new ServiceException(new MergeRegionException(
+        "Unable to merge regions not online " + regionStateA + ", " + regionStateB));
+    }
+
+    HRegionInfo regionInfoA = regionStateA.getRegion();
+    HRegionInfo regionInfoB = regionStateB.getRegion();
+    if (regionInfoA.compareTo(regionInfoB) == 0) {
+      throw new ServiceException(new MergeRegionException(
+        "Unable to merge a region to itself " + regionInfoA + ", " + regionInfoB));
+    }
+
+    if (!forcible && !HRegionInfo.areAdjacent(regionInfoA, regionInfoB)) {
+      throw new ServiceException(new MergeRegionException(
+        "Unable to merge not adjacent regions "
+          + regionInfoA.getRegionNameAsString() + ", "
+          + regionInfoB.getRegionNameAsString()
+          + " where forcible = " + forcible));
+    }
+
+    try {
+      master.dispatchMergingRegions(regionInfoA, regionInfoB, forcible);
+    } catch (IOException ioe) {
+      throw new ServiceException(ioe);
+    }
+
+    return DispatchMergingRegionsResponse.newBuilder().build();
+  }
+
+  @Override
+  public EnableCatalogJanitorResponse enableCatalogJanitor(RpcController c,
+      EnableCatalogJanitorRequest req) throws ServiceException {
+    try {
+      master.checkInitialized();
+    } catch (IOException ioe) {
+      throw new ServiceException(ioe);
+    }
+    return EnableCatalogJanitorResponse.newBuilder().setPrevValue(
+      master.catalogJanitorChore.setEnabled(req.getEnable())).build();
+  }
+
+  @Override
+  public EnableTableResponse enableTable(RpcController controller,
+      EnableTableRequest request) throws ServiceException {
+    try {
+      master.enableTable(ProtobufUtil.toTableName(request.getTableName()));
+    } catch (IOException ioe) {
+      throw new ServiceException(ioe);
+    }
+    return EnableTableResponse.newBuilder().build();
+  }
+
+  @Override
+  public ClientProtos.CoprocessorServiceResponse execMasterService(final RpcController controller,
+      final ClientProtos.CoprocessorServiceRequest request) throws ServiceException {
+    try {
+      master.checkInitialized();
+      ServerRpcController execController = new ServerRpcController();
+
+      ClientProtos.CoprocessorServiceCall call = request.getCall();
+      String serviceName = call.getServiceName();
+      String methodName = call.getMethodName();
+      if (!master.coprocessorServiceHandlers.containsKey(serviceName)) {
+        throw new UnknownProtocolException(null,
+          "No registered master coprocessor service found for name "+serviceName);
+      }
+
+      Service service = master.coprocessorServiceHandlers.get(serviceName);
+      Descriptors.ServiceDescriptor serviceDesc = service.getDescriptorForType();
+      Descriptors.MethodDescriptor methodDesc = serviceDesc.findMethodByName(methodName);
+      if (methodDesc == null) {
+        throw new UnknownProtocolException(service.getClass(),
+          "Unknown method "+methodName+" called on master service "+serviceName);
+      }
+
+      //invoke the method
+      Message execRequest = service.getRequestPrototype(methodDesc).newBuilderForType()
+          .mergeFrom(call.getRequest()).build();
+      final Message.Builder responseBuilder =
+          service.getResponsePrototype(methodDesc).newBuilderForType();
+      service.callMethod(methodDesc, execController, execRequest, new RpcCallback<Message>() {
+        @Override
+        public void run(Message message) {
+          if (message != null) {
+            responseBuilder.mergeFrom(message);
+          }
+        }
+      });
+      Message execResult = responseBuilder.build();
+
+      if (execController.getFailedOn() != null) {
+        throw execController.getFailedOn();
+      }
+      ClientProtos.CoprocessorServiceResponse.Builder builder =
+        ClientProtos.CoprocessorServiceResponse.newBuilder();
+      builder.setRegion(RequestConverter.buildRegionSpecifier(
+        RegionSpecifierType.REGION_NAME, HConstants.EMPTY_BYTE_ARRAY));
+      builder.setValue(
+        builder.getValueBuilder().setName(execResult.getClass().getName())
+          .setValue(execResult.toByteString()));
+      return builder.build();
+    } catch (IOException ie) {
+      throw new ServiceException(ie);
+    }
+  }
+
+  /**
+   * Triggers an asynchronous attempt to run a distributed procedure.
+   * {@inheritDoc}
+   */
+  @Override
+  public ExecProcedureResponse execProcedure(RpcController controller,
+      ExecProcedureRequest request) throws ServiceException {
+    try {
+      master.checkInitialized();
+      ProcedureDescription desc = request.getProcedure();
+      MasterProcedureManager mpm = master.mpmHost.getProcedureManager(
+        desc.getSignature());
+      if (mpm == null) {
+        throw new ServiceException("The procedure is not registered: "
+          + desc.getSignature());
+      }
+  
+      LOG.info(master.getClientIdAuditPrefix() + " procedure request for: "
+        + desc.getSignature());
+
+      mpm.execProcedure(desc);
+
+      // send back the max amount of time the client should wait for the procedure
+      // to complete
+      long waitTime = SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME;
+      return ExecProcedureResponse.newBuilder().setExpectedTimeout(
+        waitTime).build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public GetClusterStatusResponse getClusterStatus(RpcController controller,
+      GetClusterStatusRequest req) throws ServiceException {
+    GetClusterStatusResponse.Builder response = GetClusterStatusResponse.newBuilder();
+    try {
+      master.checkInitialized();
+      response.setClusterStatus(master.getClusterStatus().convert());
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+    return response.build();
+  }
+
+  /**
+   * List the currently available/stored snapshots. Any in-progress snapshots are ignored
+   */
+  @Override
+  public GetCompletedSnapshotsResponse getCompletedSnapshots(RpcController controller,
+      GetCompletedSnapshotsRequest request) throws ServiceException {
+    try {
+      master.checkInitialized();
+      GetCompletedSnapshotsResponse.Builder builder = GetCompletedSnapshotsResponse.newBuilder();
+      List<SnapshotDescription> snapshots = master.snapshotManager.getCompletedSnapshots();
+
+      // convert to protobuf
+      for (SnapshotDescription snapshot : snapshots) {
+        builder.addSnapshots(snapshot);
+      }
+      return builder.build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public GetNamespaceDescriptorResponse getNamespaceDescriptor(
+      RpcController controller, GetNamespaceDescriptorRequest request)
+      throws ServiceException {
+    try {
+      return GetNamespaceDescriptorResponse.newBuilder()
+        .setNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(
+          master.getNamespaceDescriptor(request.getNamespaceName())))
+        .build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  /**
+   * Get the number of regions of the table that have been updated by the alter.
+   *
+   * @return Pair indicating the number of regions updated Pair.getFirst is the
+   *         regions that are yet to be updated Pair.getSecond is the total number
+   *         of regions of the table
+   * @throws IOException
+   */
+  @Override
+  public GetSchemaAlterStatusResponse getSchemaAlterStatus(
+      RpcController controller, GetSchemaAlterStatusRequest req) throws ServiceException {
+    // TODO: currently, we query using the table name on the client side. this
+    // may overlap with other table operations or the table operation may
+    // have completed before querying this API. We need to refactor to a
+    // transaction system in the future to avoid these ambiguities.
+    TableName tableName = ProtobufUtil.toTableName(req.getTableName());
+
+    try {
+      master.checkInitialized();
+      Pair<Integer,Integer> pair = master.assignmentManager.getReopenStatus(tableName);
+      GetSchemaAlterStatusResponse.Builder ret = GetSchemaAlterStatusResponse.newBuilder();
+      ret.setYetToUpdateRegions(pair.getFirst());
+      ret.setTotalRegions(pair.getSecond());
+      return ret.build();
+    } catch (IOException ioe) {
+      throw new ServiceException(ioe);
+    }
+  }
+
+  /**
+   * Get list of TableDescriptors for requested tables.
+   * @param controller Unused (set to null).
+   * @param req GetTableDescriptorsRequest that contains:
+   * - tableNames: requested tables, or if empty, all are requested
+   * @return GetTableDescriptorsResponse
+   * @throws ServiceException
+   */
+  @Override
+  public GetTableDescriptorsResponse getTableDescriptors(RpcController c,
+      GetTableDescriptorsRequest req) throws ServiceException {
+    try {
+      master.checkInitialized();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+
+    List<HTableDescriptor> descriptors = new ArrayList<HTableDescriptor>();
+    List<TableName> tableNameList = new ArrayList<TableName>();
+    for(HBaseProtos.TableName tableNamePB: req.getTableNamesList()) {
+      tableNameList.add(ProtobufUtil.toTableName(tableNamePB));
+    }
+    boolean bypass = false;
+    if (master.cpHost != null) {
+      try {
+        bypass = master.cpHost.preGetTableDescriptors(tableNameList, descriptors);
+      } catch (IOException ioe) {
+        throw new ServiceException(ioe);
+      }
+    }
+
+    if (!bypass) {
+      if (req.getTableNamesCount() == 0) {
+        // request for all TableDescriptors
+        Map<String, HTableDescriptor> descriptorMap = null;
+        try {
+          descriptorMap = master.getTableDescriptors().getAll();
+        } catch (IOException e) {
+          LOG.warn("Failed getting all descriptors", e);
+        }
+        if (descriptorMap != null) {
+          for(HTableDescriptor desc: descriptorMap.values()) {
+            if(!desc.getTableName().isSystemTable()) {
+              descriptors.add(desc);
+            }
+          }
+        }
+      } else {
+        for (TableName s: tableNameList) {
+          try {
+            HTableDescriptor desc = master.getTableDescriptors().get(s);
+            if (desc != null) {
+              descriptors.add(desc);
+            }
+          } catch (IOException e) {
+            LOG.warn("Failed getting descriptor for " + s, e);
+          }
+        }
+      }
+
+      if (master.cpHost != null) {
+        try {
+          master.cpHost.postGetTableDescriptors(descriptors);
+        } catch (IOException ioe) {
+          throw new ServiceException(ioe);
+        }
+      }
+    }
+
+    GetTableDescriptorsResponse.Builder builder = GetTableDescriptorsResponse.newBuilder();
+    for (HTableDescriptor htd: descriptors) {
+      builder.addTableSchema(htd.convert());
+    }
+    return builder.build();
+  }
+
+  /**
+   * Get list of userspace table names
+   * @param controller Unused (set to null).
+   * @param req GetTableNamesRequest
+   * @return GetTableNamesResponse
+   * @throws ServiceException
+   */
+  @Override
+  public GetTableNamesResponse getTableNames(RpcController controller,
+      GetTableNamesRequest req) throws ServiceException {
+    try {
+      master.checkInitialized();
+      Collection<HTableDescriptor> descriptors = master.getTableDescriptors().getAll().values();
+      GetTableNamesResponse.Builder builder = GetTableNamesResponse.newBuilder();
+      for (HTableDescriptor descriptor: descriptors) {
+        if (descriptor.getTableName().isSystemTable()) {
+          continue;
+        }
+        builder.addTableNames(ProtobufUtil.toProtoTableName(descriptor.getTableName()));
+      }
+      return builder.build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled(RpcController c,
+      IsCatalogJanitorEnabledRequest req) throws ServiceException {
+    boolean isEnabled = master.catalogJanitorChore != null ?
+      master.catalogJanitorChore.getEnabled() : false;
+    return IsCatalogJanitorEnabledResponse.newBuilder().setValue(isEnabled).build();
+  }
+
+  @Override
+  public IsMasterRunningResponse isMasterRunning(RpcController c,
+      IsMasterRunningRequest req) throws ServiceException {
+    try {
+      master.checkInitialized();
+      return IsMasterRunningResponse.newBuilder().setIsMasterRunning(
+        !master.isStopped()).build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  /**
+   * Checks if the specified procedure is done.
+   * @return true if the procedure is done,
+   *   false if the procedure is in the process of completing
+   * @throws ServiceException if invalid procedure, or
+   *  a failed procedure with progress failure reason.
+   */
+  @Override
+  public IsProcedureDoneResponse isProcedureDone(RpcController controller,
+      IsProcedureDoneRequest request) throws ServiceException {
+    try {
+      master.checkInitialized();
+      ProcedureDescription desc = request.getProcedure();
+      MasterProcedureManager mpm = master.mpmHost.getProcedureManager(
+        desc.getSignature());
+      if (mpm == null) {
+        throw new ServiceException("The procedure is not registered: "
+          + desc.getSignature());
+      }
+      LOG.debug("Checking to see if procedure from request:"
+        + desc.getSignature() + " is done");
+
+      IsProcedureDoneResponse.Builder builder =
+        IsProcedureDoneResponse.newBuilder();
+      boolean done = mpm.isProcedureDone(desc);
+      builder.setDone(done);
+      return builder.build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  /**
+   * Returns the status of the requested snapshot restore/clone operation.
+   * This method is not exposed to the user, it is just used internally by HBaseAdmin
+   * to verify if the restore is completed.
+   *
+   * No exceptions are thrown if the restore is not running, the result will be "done".
+   *
+   * @return done <tt>true</tt> if the restore/clone operation is completed.
+   * @throws ServiceException if the operation failed.
+   */
+  @Override
+  public IsRestoreSnapshotDoneResponse isRestoreSnapshotDone(RpcController controller,
+      IsRestoreSnapshotDoneRequest request) throws ServiceException {
+    try {
+      master.checkInitialized();
+      SnapshotDescription snapshot = request.getSnapshot();
+      IsRestoreSnapshotDoneResponse.Builder builder = IsRestoreSnapshotDoneResponse.newBuilder();
+      boolean done = master.snapshotManager.isRestoreDone(snapshot);
+      builder.setDone(done);
+      return builder.build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  /**
+   * Checks if the specified snapshot is done.
+   * @return true if the snapshot is in file system ready to use,
+   *   false if the snapshot is in the process of completing
+   * @throws ServiceException wrapping UnknownSnapshotException if invalid snapshot, or
+   *  a wrapped HBaseSnapshotException with progress failure reason.
+   */
+  @Override
+  public IsSnapshotDoneResponse isSnapshotDone(RpcController controller,
+      IsSnapshotDoneRequest request) throws ServiceException {
+    LOG.debug("Checking to see if snapshot from request:" +
+      ClientSnapshotDescriptionUtils.toString(request.getSnapshot()) + " is done");
+    try {
+      master.checkInitialized();
+      IsSnapshotDoneResponse.Builder builder = IsSnapshotDoneResponse.newBuilder();
+      boolean done = master.snapshotManager.isSnapshotDone(request.getSnapshot());
+      builder.setDone(done);
+      return builder.build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public ListNamespaceDescriptorsResponse listNamespaceDescriptors(RpcController c,
+      ListNamespaceDescriptorsRequest request) throws ServiceException {
+    try {
+      ListNamespaceDescriptorsResponse.Builder response =
+        ListNamespaceDescriptorsResponse.newBuilder();
+      for(NamespaceDescriptor ns: master.listNamespaceDescriptors()) {
+        response.addNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(ns));
+      }
+      return response.build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace(RpcController c,
+      ListTableDescriptorsByNamespaceRequest request) throws ServiceException {
+    try {
+      ListTableDescriptorsByNamespaceResponse.Builder b =
+        ListTableDescriptorsByNamespaceResponse.newBuilder();
+      for(HTableDescriptor htd: master.listTableDescriptorsByNamespace(request.getNamespaceName())) {
+        b.addTableSchema(htd.convert());
+      }
+      return b.build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public ListTableNamesByNamespaceResponse listTableNamesByNamespace(RpcController c,
+      ListTableNamesByNamespaceRequest request) throws ServiceException {
+    try {
+      ListTableNamesByNamespaceResponse.Builder b =
+        ListTableNamesByNamespaceResponse.newBuilder();
+      for (TableName tableName: master.listTableNamesByNamespace(request.getNamespaceName())) {
+        b.addTableName(ProtobufUtil.toProtoTableName(tableName));
+      }
+      return b.build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public ModifyColumnResponse modifyColumn(RpcController controller,
+      ModifyColumnRequest req) throws ServiceException {
+    try {
+      master.modifyColumn(ProtobufUtil.toTableName(req.getTableName()),
+        HColumnDescriptor.convert(req.getColumnFamilies()));
+    } catch (IOException ioe) {
+      throw new ServiceException(ioe);
+    }
+    return ModifyColumnResponse.newBuilder().build();
+  }
+
+  @Override
+  public ModifyNamespaceResponse modifyNamespace(RpcController controller,
+      ModifyNamespaceRequest request) throws ServiceException {
+    try {
+      master.modifyNamespace(
+        ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()));
+      return ModifyNamespaceResponse.getDefaultInstance();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public ModifyTableResponse modifyTable(RpcController controller,
+      ModifyTableRequest req) throws ServiceException {
+    try {
+      master.modifyTable(ProtobufUtil.toTableName(req.getTableName()),
+        HTableDescriptor.convert(req.getTableSchema()));
+    } catch (IOException ioe) {
+      throw new ServiceException(ioe);
+    }
+    return ModifyTableResponse.newBuilder().build();
+  }
+
+  @Override
+  public MoveRegionResponse moveRegion(RpcController controller,
+      MoveRegionRequest req) throws ServiceException {
+    final byte [] encodedRegionName = req.getRegion().getValue().toByteArray();
+    RegionSpecifierType type = req.getRegion().getType();
+    final byte [] destServerName = (req.hasDestServerName())?
+      Bytes.toBytes(ProtobufUtil.toServerName(req.getDestServerName()).getServerName()):null;
+    MoveRegionResponse mrr = MoveRegionResponse.newBuilder().build();
+
+    if (type != RegionSpecifierType.ENCODED_REGION_NAME) {
+      LOG.warn("moveRegion specifier type: expected: " + RegionSpecifierType.ENCODED_REGION_NAME
+        + " actual: " + type);
+    }
+
+    try {
+      master.checkInitialized();
+      master.move(encodedRegionName, destServerName);
+    } catch (IOException ioe) {
+      throw new ServiceException(ioe);
+    }
+    return mrr;
+  }
+
+  /**
+   * Offline specified region from master's in-memory state. It will not attempt to
+   * reassign the region as in unassign.
+   *
+   * This is a special method that should be used by experts or hbck.
+   *
+   */
+  @Override
+  public OfflineRegionResponse offlineRegion(RpcController controller,
+      OfflineRegionRequest request) throws ServiceException {
+    final byte [] regionName = request.getRegion().getValue().toByteArray();
+    RegionSpecifierType type = request.getRegion().getType();
+    if (type != RegionSpecifierType.REGION_NAME) {
+      LOG.warn("moveRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME
+        + " actual: " + type);
+    }
+
+    try {
+      master.checkInitialized();
+      Pair<HRegionInfo, ServerName> pair =
+        MetaReader.getRegion(master.getCatalogTracker(), regionName);
+      if (pair == null) throw new UnknownRegionException(Bytes.toStringBinary(regionName));
+      HRegionInfo hri = pair.getFirst();
+      if (master.cpHost != null) {
+        master.cpHost.preRegionOffline(hri);
+      }
+      LOG.info(master.getClientIdAuditPrefix() + " offline " + hri.getRegionNameAsString());
+      master.assignmentManager.regionOffline(hri);
+      if (master.cpHost != null) {
+        master.cpHost.postRegionOffline(hri);
+      }
+    } catch (IOException ioe) {
+      throw new ServiceException(ioe);
+    }
+    return OfflineRegionResponse.newBuilder().build();
+  }
+
+  /**
+   * Execute Restore/Clone snapshot operation.
+   *
+   * <p>If the specified table exists a "Restore" is executed, replacing the table
+   * schema and directory data with the content of the snapshot.
+   * The table must be disabled, or a UnsupportedOperationException will be thrown.
+   *
+   * <p>If the table doesn't exist a "Clone" is executed, a new table is created
+   * using the schema at the time of the snapshot, and the content of the snapshot.
+   *
+   * <p>The restore/clone operation does not require copying HFiles. Since HFiles
+   * are immutable the table can point to and use the same files as the original one.
+   */
+  @Override
+  public RestoreSnapshotResponse restoreSnapshot(RpcController controller,
+      RestoreSnapshotRequest request) throws ServiceException {
+    try {
+      master.checkInitialized();
+      master.snapshotManager.checkSnapshotSupport();
+
+    // ensure namespace exists
+      TableName dstTable = TableName.valueOf(request.getSnapshot().getTable());
+      master.getNamespaceDescriptor(dstTable.getNamespaceAsString());
+
+      SnapshotDescription reqSnapshot = request.getSnapshot();
+      master.snapshotManager.restoreSnapshot(reqSnapshot);
+      return RestoreSnapshotResponse.newBuilder().build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public RunCatalogScanResponse runCatalogScan(RpcController c,
+      RunCatalogScanRequest req) throws ServiceException {
+    try {
+      master.checkInitialized();
+      return ResponseConverter.buildRunCatalogScanResponse(master.catalogJanitorChore.scan());
+    } catch (IOException ioe) {
+      throw new ServiceException(ioe);
+    }
+  }
+
+  @Override
+  public SetBalancerRunningResponse setBalancerRunning(RpcController c,
+      SetBalancerRunningRequest req) throws ServiceException {
+    try {
+      master.checkInitialized();
+      boolean prevValue = (req.getSynchronous())?
+        synchronousBalanceSwitch(req.getOn()) : master.balanceSwitch(req.getOn());
+      return SetBalancerRunningResponse.newBuilder().setPrevBalanceValue(prevValue).build();
+    } catch (IOException ioe) {
+      throw new ServiceException(ioe);
+    }
+  }
+
+  @Override
+  public ShutdownResponse shutdown(RpcController controller,
+      ShutdownRequest request) throws ServiceException {
+    LOG.info(master.getClientIdAuditPrefix() + " shutdown");
+    master.shutdown();
+    return ShutdownResponse.newBuilder().build();
+  }
+
+  /**
+   * Triggers an asynchronous attempt to take a snapshot.
+   * {@inheritDoc}
+   */
+  @Override
+  public SnapshotResponse snapshot(RpcController controller,
+      SnapshotRequest request) throws ServiceException {
+    try {
+      master.checkInitialized();
+      master.snapshotManager.checkSnapshotSupport();
+
+      LOG.info(master.getClientIdAuditPrefix() + " snapshot request for:" +
+        ClientSnapshotDescriptionUtils.toString(request.getSnapshot()));
+      // get the snapshot information
+      SnapshotDescription snapshot = SnapshotDescriptionUtils.validate(
+        request.getSnapshot(), master.getConfiguration());
+      master.snapshotManager.takeSnapshot(snapshot);
+
+      // send back the max amount of time the client should wait for the snapshot to complete
+      long waitTime = SnapshotDescriptionUtils.getMaxMasterTimeout(master.getConfiguration(),
+        snapshot.getType(), SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME);
+      return SnapshotResponse.newBuilder().setExpectedTimeout(waitTime).build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public StopMasterResponse stopMaster(RpcController controller,
+      StopMasterRequest request) throws ServiceException {
+    LOG.info(master.getClientIdAuditPrefix() + " stop");
+    master.stopMaster();
+    return StopMasterResponse.newBuilder().build();
+  }
+
+  @Override
+  public UnassignRegionResponse unassignRegion(RpcController controller,
+      UnassignRegionRequest req) throws ServiceException {
+    try {
+      final byte [] regionName = req.getRegion().getValue().toByteArray();
+      RegionSpecifierType type = req.getRegion().getType();
+      final boolean force = req.getForce();
+      UnassignRegionResponse urr = UnassignRegionResponse.newBuilder().build();
+
+      master.checkInitialized();
+      if (type != RegionSpecifierType.REGION_NAME) {
+        LOG.warn("unassignRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME
+          + " actual: " + type);
+      }
+      Pair<HRegionInfo, ServerName> pair =
+        MetaReader.getRegion(master.getCatalogTracker(), regionName);
+      if (pair == null) throw new UnknownRegionException(Bytes.toString(regionName));
+      HRegionInfo hri = pair.getFirst();
+      if (master.cpHost != null) {
+        if (master.cpHost.preUnassign(hri, force)) {
+          return urr;
+        }
+      }
+      LOG.debug(master.getClientIdAuditPrefix() + " unassign " + hri.getRegionNameAsString()
+          + " in current location if it is online and reassign.force=" + force);
+      master.assignmentManager.unassign(hri, force);
+      if (master.assignmentManager.getRegionStates().isRegionOffline(hri)) {
+        LOG.debug("Region " + hri.getRegionNameAsString()
+            + " is not online on any region server, reassigning it.");
+        master.assignRegion(hri);
+      }
+      if (master.cpHost != null) {
+        master.cpHost.postUnassign(hri, force);
+      }
+
+      return urr;
+    } catch (IOException ioe) {
+      throw new ServiceException(ioe);
+    }
+  }
+}

Propchange: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java?rev=1581479&r1=1581478&r2=1581479&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java Tue Mar 25 19:34:52 2014
@@ -22,13 +22,13 @@ import java.io.IOException;
 import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.TableDescriptors;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.executor.ExecutorService;
@@ -68,7 +68,7 @@ public interface MasterServices extends 
   /**
    * @return Master's instance of {@link MasterCoprocessorHost}
    */
-  MasterCoprocessorHost getCoprocessorHost();
+  MasterCoprocessorHost getMasterCoprocessorHost();
 
   /**
    * Check table is modifiable; i.e. exists and is offline.

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java?rev=1581479&r1=1581478&r2=1581479&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java Tue Mar 25 19:34:52 2014
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hbase.master;
 
 import java.io.IOException;
-
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -34,9 +33,10 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl;
+import org.apache.hadoop.hbase.util.FSUtils;
+
 import com.google.protobuf.ServiceException;
 
 /**
@@ -55,6 +55,14 @@ public class MasterStatusServlet extends
     HMaster master = (HMaster) getServletContext().getAttribute(HMaster.MASTER);
     assert master != null : "No Master in context!";
 
+    response.setContentType("text/html");
+
+    if (!master.isOnline()) {
+      response.getWriter().write("The Master is initializing!");
+      response.getWriter().close();
+      return;
+    }
+
     Configuration conf = master.getConfiguration();
     HBaseAdmin admin = new HBaseAdmin(conf);
 
@@ -73,8 +81,7 @@ public class MasterStatusServlet extends
       servers = master.getServerManager().getOnlineServersList();
       deadServers = master.getServerManager().getDeadServers().copyServerNames();
     }
-    
-    response.setContentType("text/html");
+
     MasterStatusTmpl tmpl;
     try {
        tmpl = new MasterStatusTmpl()
@@ -82,8 +89,8 @@ public class MasterStatusServlet extends
       .setMetaLocation(metaLocation)
       .setServers(servers)
       .setDeadServers(deadServers)
-      .setCatalogJanitorEnabled(master.isCatalogJanitorEnabled(null,
-          RequestConverter.buildIsCatalogJanitorEnabledRequest()).getValue());
+      .setCatalogJanitorEnabled(master.getMasterRpcServices().isCatalogJanitorEnabled(
+          null, RequestConverter.buildIsCatalogJanitorEnabledRequest()).getValue());
     } catch (ServiceException s) {
       admin.close();
       throw new IOException(s);

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java?rev=1581479&r1=1581478&r2=1581479&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java Tue Mar 25 19:34:52 2014
@@ -46,7 +46,7 @@ public class MetricsMasterWrapperImpl im
 
   @Override
   public String getZookeeperQuorum() {
-    ZooKeeperWatcher zk = master.getZooKeeperWatcher();
+    ZooKeeperWatcher zk = master.getZooKeeper();
     if (zk == null) {
       return "";
     }
@@ -55,7 +55,7 @@ public class MetricsMasterWrapperImpl im
 
   @Override
   public String[] getCoprocessors() {
-    return master.getCoprocessors();
+    return master.getMasterCoprocessors();
   }
 
   @Override

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java?rev=1581479&r1=1581478&r2=1581479&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java Tue Mar 25 19:34:52 2014
@@ -671,6 +671,16 @@ public class RegionStates {
         numServers++;
       }
     }
+    if (numServers > 1) {
+      // The master region server holds only a couple regions.
+      // Don't consider this server in calculating the average load
+      // if there are other region servers to avoid possible confusion.
+      Set<HRegionInfo> hris = serverHoldings.get(server.getServerName());
+      if (hris != null) {
+        totalLoad -= hris.size();
+        numServers--;
+      }
+    }
     return numServers == 0 ? 0.0 :
       (double)totalLoad / (double)numServers;
   }

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java?rev=1581479&r1=1581478&r2=1581479&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java Tue Mar 25 19:34:52 2014
@@ -58,9 +58,13 @@ import org.apache.hadoop.hbase.protobuf.
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Triple;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.zookeeper.KeeperException;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.protobuf.ServiceException;
@@ -182,7 +186,6 @@ public class ServerManager {
     this(master, services, true);
   }
 
-  @SuppressWarnings("deprecation")
   ServerManager(final Server master, final MasterServices services,
       final boolean connect) throws IOException {
     this.master = master;
@@ -441,12 +444,21 @@ public class ServerManager {
 
   void letRegionServersShutdown() {
     long previousLogTime = 0;
+    ServerName sn = master.getServerName();
+    ZooKeeperWatcher zkw = master.getZooKeeper();
     while (!onlineServers.isEmpty()) {
 
       if (System.currentTimeMillis() > (previousLogTime + 1000)) {
+        Set<ServerName> remainingServers = onlineServers.keySet();
+        synchronized (onlineServers) {
+          if (remainingServers.size() == 1 && remainingServers.contains(sn)) {
+            // Master will delete itself later.
+            return;
+          }
+        }
         StringBuilder sb = new StringBuilder();
         // It's ok here to not sync on onlineServers - merely logging
-        for (ServerName key : this.onlineServers.keySet()) {
+        for (ServerName key : remainingServers) {
           if (sb.length() > 0) {
             sb.append(", ");
           }
@@ -456,6 +468,19 @@ public class ServerManager {
         previousLogTime = System.currentTimeMillis();
       }
 
+      try {
+        List<String> servers = ZKUtil.listChildrenNoWatch(zkw, zkw.rsZNode);
+        if (servers == null || (servers.size() == 1
+            && servers.contains(sn.toString()))) {
+          LOG.info("ZK shows there is only the master self online, exiting now");
+          // Master could have lost some ZK events, no need to wait more.
+          break;
+        }
+      } catch (KeeperException ke) {
+        LOG.warn("Failed to list regionservers", ke);
+        // ZK is malfunctioning, don't hang here
+        break;
+      }
       synchronized (onlineServers) {
         try {
           onlineServers.wait(100);
@@ -471,6 +496,12 @@ public class ServerManager {
    * shutdown processing.
    */
   public synchronized void expireServer(final ServerName serverName) {
+    if (serverName.equals(master.getServerName())) {
+      if (!(master.isAborted() || master.isStopped())) {
+        master.stop("We lost our znode?");
+      }
+      return;
+    }
     if (!services.isServerShutdownHandlerEnabled()) {
       LOG.info("Master doesn't enable ServerShutdownHandler during initialization, "
           + "delay expiring server " + serverName);
@@ -758,12 +789,18 @@ public class ServerManager {
     * @throws IOException
     * @throws RetriesExhaustedException wrapping a ConnectException if failed
     */
+  @SuppressWarnings("deprecation")
   private AdminService.BlockingInterface getRsAdmin(final ServerName sn)
   throws IOException {
     AdminService.BlockingInterface admin = this.rsAdmins.get(sn);
     if (admin == null) {
       LOG.debug("New admin connection to " + sn.toString());
-      admin = this.connection.getAdmin(sn);
+      if (sn.equals(master.getServerName()) && master instanceof HRegionServer) {
+        // A master is also a region server now, see HBASE-10569 for details
+        admin = ((HRegionServer)master).getRSRpcServices();
+      } else {
+        admin = this.connection.getAdmin(sn);
+      }
       this.rsAdmins.put(sn, admin);
     }
     return admin;
@@ -813,12 +850,10 @@ public class ServerManager {
     long lastCountChange = startTime;
     int count = countOfRegionServers();
     int oldCount = 0;
-    while (
-      !this.master.isStopped() &&
-        count < maxToStart &&
-        (lastCountChange+interval > now || timeout > slept || count < minToStart)
-      ){
-
+    ServerName masterSn = master.getServerName();
+    boolean selfCheckedIn = isServerOnline(masterSn);
+    while (!this.master.isStopped() && !selfCheckedIn && count < maxToStart
+        && (lastCountChange+interval > now || timeout > slept || count < minToStart)) {
       // Log some info at every interval time or if there is a change
       if (oldCount != count || lastLogTime+interval < now){
         lastLogTime = now;
@@ -837,6 +872,8 @@ public class ServerManager {
       now =  System.currentTimeMillis();
       slept = now - startTime;
 
+      selfCheckedIn = isServerOnline(masterSn);
+
       oldCount = count;
       count = countOfRegionServers();
       if (count != oldCount) {
@@ -942,7 +979,6 @@ public class ServerManager {
 
     // Remove the deadNotExpired servers from the server list.
     removeDeadNotExpiredServers(destServers);
-
     return destServers;
   }
 

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java?rev=1581479&r1=1581478&r2=1581479&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java Tue Mar 25 19:34:52 2014
@@ -46,7 +46,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hbase.Chore;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.SplitLogCounters;
@@ -151,25 +150,7 @@ public class SplitLogManager extends Zoo
 
   /**
    * Wrapper around {@link #SplitLogManager(ZooKeeperWatcher zkw, Configuration conf,
-   *   Stoppable stopper, MasterServices master, ServerName serverName,
-   *   boolean masterRecovery, TaskFinisher tf)}
-   * with masterRecovery = false, and tf = null.  Used in unit tests.
-   *
-   * @param zkw the ZK watcher
-   * @param conf the HBase configuration
-   * @param stopper the stoppable in case anything is wrong
-   * @param master the master services
-   * @param serverName the master server name
-   */
-  public SplitLogManager(ZooKeeperWatcher zkw, final Configuration conf,
-      Stoppable stopper, MasterServices master, ServerName serverName) {
-    this(zkw, conf, stopper, master, serverName, false, null);
-  }
-
-  /**
-   * Wrapper around {@link #SplitLogManager(ZooKeeperWatcher zkw, Configuration conf,
-   *   Stoppable stopper, MasterServices master, ServerName serverName,
-   *   boolean masterRecovery, TaskFinisher tf)}
+   *   Stoppable stopper, MasterServices master, ServerName serverName, TaskFinisher tf)}
    * that provides a task finisher for copying recovered edits to their final destination.
    * The task finisher has to be robust because it can be arbitrarily restarted or called
    * multiple times.
@@ -179,11 +160,10 @@ public class SplitLogManager extends Zoo
    * @param stopper the stoppable in case anything is wrong
    * @param master the master services
    * @param serverName the master server name
-   * @param masterRecovery an indication if the master is in recovery
    */
   public SplitLogManager(ZooKeeperWatcher zkw, final Configuration conf,
-      Stoppable stopper, MasterServices master, ServerName serverName, boolean masterRecovery) {
-    this(zkw, conf, stopper, master, serverName, masterRecovery, new TaskFinisher() {
+      Stoppable stopper, MasterServices master, ServerName serverName) {
+    this(zkw, conf, stopper, master, serverName, new TaskFinisher() {
       @Override
       public Status finish(ServerName workerName, String logfile) {
         try {
@@ -207,12 +187,11 @@ public class SplitLogManager extends Zoo
    * @param stopper the stoppable in case anything is wrong
    * @param master the master services
    * @param serverName the master server name
-   * @param masterRecovery an indication if the master is in recovery
    * @param tf task finisher
    */
   public SplitLogManager(ZooKeeperWatcher zkw, Configuration conf,
         Stoppable stopper, MasterServices master,
-        ServerName serverName, boolean masterRecovery, TaskFinisher tf) {
+        ServerName serverName, TaskFinisher tf) {
     super(zkw);
     this.taskFinisher = tf;
     this.conf = conf;
@@ -233,10 +212,8 @@ public class SplitLogManager extends Zoo
 
     this.failedDeletions = Collections.synchronizedSet(new HashSet<String>());
 
-    if (!masterRecovery) {
-      Threads.setDaemonThreadRunning(timeoutMonitor.getThread(), serverName
-          + ".splitLogManagerTimeoutMonitor");
-    }
+    Threads.setDaemonThreadRunning(timeoutMonitor.getThread(), serverName
+      + ".splitLogManagerTimeoutMonitor");
     // Watcher can be null during tests with Mock'd servers.
     if (this.watcher != null) {
       this.watcher.registerListener(this);

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java?rev=1581479&r1=1581478&r2=1581479&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java Tue Mar 25 19:34:52 2014
@@ -22,6 +22,8 @@ import java.util.Arrays;
 import java.util.Comparator;
 import java.util.Deque;
 import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -38,9 +40,12 @@ import org.apache.hadoop.hbase.HBaseIOEx
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.RegionLoad;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.LoadBalancer;
 import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.RegionPlan;
+import org.apache.hadoop.hbase.security.access.AccessControlLists;
 
 import com.google.common.base.Joiner;
 import com.google.common.collect.ArrayListMultimap;
@@ -56,12 +61,32 @@ public abstract class BaseLoadBalancer i
   private static final int MIN_SERVER_BALANCE = 2;
   private volatile boolean stopped = false;
 
+  protected static final Set<String> TABLES_ON_MASTER = new HashSet<String>();
+
+  /**
+   * Regions of these tables will be put on the master regionserver by default.
+   */
+  static {
+    TABLES_ON_MASTER.add(AccessControlLists.ACL_TABLE_NAME.getNameAsString());
+    TABLES_ON_MASTER.add(TableName.NAMESPACE_TABLE_NAME.getNameAsString());
+    TABLES_ON_MASTER.add(TableName.META_TABLE_NAME.getNameAsString());
+  }
+
+  /**
+   * Check if a region belongs to some small system table.
+   * If so, it may be expected to be put on the master regionserver.
+   */
+  protected static boolean shouldBeOnMaster(HRegionInfo region) {
+    return TABLES_ON_MASTER.contains(region.getTable().getNameAsString());
+  }
+
   /**
    * An efficient array based implementation similar to ClusterState for keeping
    * the status of the cluster in terms of region assignment and distribution.
    * To be used by LoadBalancers.
    */
   protected static class Cluster {
+    ServerName masterServerName;
     ServerName[] servers;
     ArrayList<String> tables;
     HRegionInfo[] regions;
@@ -74,6 +99,7 @@ public abstract class BaseLoadBalancer i
     int[]   regionIndexToTableIndex;     //regionIndex -> tableIndex
     int[][] numRegionsPerServerPerTable; //serverIndex -> tableIndex -> # regions
     int[]   numMaxRegionsPerTable;       //tableIndex -> max number of regions in a single RS
+    int     numUserRegionsOnMaster;      //number of user regions on the active master
 
     Integer[] serverIndicesSortedByRegionCount;
 
@@ -87,9 +113,13 @@ public abstract class BaseLoadBalancer i
     int numMovedRegions = 0; //num moved regions from the initial configuration
     int numMovedMetaRegions = 0;       //num of moved regions that are META
 
-    protected Cluster(Map<ServerName, List<HRegionInfo>> clusterState,  Map<String, Deque<RegionLoad>> loads,
+    @SuppressWarnings("unchecked")
+    protected Cluster(ServerName masterServerName,
+        Map<ServerName, List<HRegionInfo>> clusterState,
+        Map<String, Deque<RegionLoad>> loads,
         RegionLocationFinder regionFinder) {
 
+      this.masterServerName = masterServerName;
       serversToIndex = new HashMap<String, Integer>();
       tablesToIndex = new HashMap<String, Integer>();
       //regionsToIndex = new HashMap<HRegionInfo, Integer>();
@@ -147,6 +177,14 @@ public abstract class BaseLoadBalancer i
           regionsPerServer[serverIndex] = new int[entry.getValue().size()];
         }
         serverIndicesSortedByRegionCount[serverIndex] = serverIndex;
+
+        if (servers[serverIndex].equals(masterServerName)) {
+          for (HRegionInfo hri: entry.getValue()) {
+            if (!shouldBeOnMaster(hri)) {
+              numUserRegionsOnMaster++;
+            }
+          }
+        }
       }
 
       for (Entry<ServerName, List<HRegionInfo>> entry : clusterState.entrySet()) {
@@ -218,6 +256,21 @@ public abstract class BaseLoadBalancer i
     }
 
     public void moveOrSwapRegion(int lServer, int rServer, int lRegion, int rRegion) {
+      if (servers[lServer].equals(masterServerName)) {
+        if (lRegion >= 0 && !shouldBeOnMaster(regions[lRegion])) {
+          numUserRegionsOnMaster--;
+        }
+        if (rRegion >= 0 && !shouldBeOnMaster(regions[rRegion])) {
+          numUserRegionsOnMaster++;
+        }
+      } else if (servers[rServer].equals(masterServerName)) {
+        if (lRegion >= 0 && !shouldBeOnMaster(regions[lRegion])) {
+          numUserRegionsOnMaster++;
+        }
+        if (rRegion >= 0 && !shouldBeOnMaster(regions[rRegion])) {
+          numUserRegionsOnMaster--;
+        }
+      }
       //swap
       if (rRegion >= 0 && lRegion >= 0) {
         regionMoved(rRegion, rServer, lServer);
@@ -354,6 +407,7 @@ public abstract class BaseLoadBalancer i
   private static final Log LOG = LogFactory.getLog(BaseLoadBalancer.class);
 
   protected final MetricsBalancer metricsBalancer = new MetricsBalancer();
+  protected ServerName masterServerName;
   protected MasterServices services;
 
   @Override
@@ -369,6 +423,52 @@ public abstract class BaseLoadBalancer i
     this.slop = conf.getFloat("hbase.regions.slop", (float) 0.2);
   }
 
+  /**
+   * Balance the regions that should be on master regionserver.
+   */
+  protected List<RegionPlan> balanceMasterRegions(
+      Map<ServerName, List<HRegionInfo>> clusterMap) {
+    if (services == null || clusterMap.size() <= 1) return null;
+    List<RegionPlan> plans = null;
+    List<HRegionInfo> regions = clusterMap.get(masterServerName);
+    if (regions != null) {
+      Iterator<ServerName> keyIt = null;
+      for (HRegionInfo region: regions) {
+        if (shouldBeOnMaster(region)) continue;
+
+        // Find a non-master regionserver to host the region
+        if (keyIt == null || !keyIt.hasNext()) {
+          keyIt = clusterMap.keySet().iterator();
+        }
+        ServerName dest = keyIt.next();
+        if (masterServerName.equals(dest)) {
+          dest = keyIt.next();
+        }
+
+        // Move this region away from the master regionserver
+        RegionPlan plan = new RegionPlan(region, masterServerName, dest);
+        if (plans == null) {
+          plans = new ArrayList<RegionPlan>();
+        }
+        plans.add(plan);
+      }
+    }
+    for (Map.Entry<ServerName, List<HRegionInfo>> server: clusterMap.entrySet()) {
+      if (masterServerName.equals(server.getKey())) continue;
+      for (HRegionInfo region: server.getValue()) {
+        if (!shouldBeOnMaster(region)) continue;
+
+        // Move this region to the master regionserver
+        RegionPlan plan = new RegionPlan(region, server.getKey(), masterServerName);
+        if (plans == null) {
+          plans = new ArrayList<RegionPlan>();
+        }
+        plans.add(plan);
+      }
+    }
+    return plans;
+  }
+
   @Override
   public Configuration getConf() {
     return this.config;
@@ -381,6 +481,7 @@ public abstract class BaseLoadBalancer i
 
   @Override
   public void setMasterServices(MasterServices masterServices) {
+    masterServerName = masterServices.getServerName();
     this.services = masterServices;
   }
 
@@ -438,19 +539,43 @@ public abstract class BaseLoadBalancer i
       return null;
     }
     Map<ServerName, List<HRegionInfo>> assignments = new TreeMap<ServerName, List<HRegionInfo>>();
-    int numRegions = regions.size();
     int numServers = servers.size();
-    int max = (int) Math.ceil((float) numRegions / numServers);
-    int serverIdx = 0;
-    if (numServers > 1) {
-      serverIdx = RANDOM.nextInt(numServers);
+    if (numServers == 1) { // Only one server, nothing fancy we can do here
+      assignments.put(servers.get(0), new ArrayList<HRegionInfo>(regions));
+      return assignments;
     }
+
+    int numRegions = regions.size();
+    // Master regionserver is in the server list.
+    boolean masterIncluded = servers.contains(masterServerName);
+    int skipServers = numServers;
+    if (masterIncluded) {
+      skipServers--;
+    }
+    int max = (int) Math.ceil((float) numRegions / skipServers);
+    int serverIdx = RANDOM.nextInt(numServers);
     int regionIdx = 0;
     for (int j = 0; j < numServers; j++) {
       ServerName server = servers.get((j + serverIdx) % numServers);
+      if (server.equals(masterServerName)) {
+        // Don't put non-special region on the master regionserver,
+        // So that it is not overloaded.
+        continue;
+      }
       List<HRegionInfo> serverRegions = new ArrayList<HRegionInfo>(max);
-      for (int i = regionIdx; i < numRegions; i += numServers) {
-        serverRegions.add(regions.get(i % numRegions));
+      for (int i = regionIdx; i < numRegions; i += skipServers) {
+        HRegionInfo region = regions.get(i % numRegions);
+        if (!(masterIncluded && shouldBeOnMaster(region))) {
+          serverRegions.add(region);
+          continue;
+        }
+        // Master is in the target list and this is a special region
+        List<HRegionInfo> masterRegions = assignments.get(masterServerName);
+        if (masterRegions == null) {
+          masterRegions = new ArrayList<HRegionInfo>(max);
+          assignments.put(masterServerName, masterRegions);
+        }
+        masterRegions.add(region);
       }
       assignments.put(server, serverRegions);
       regionIdx++;
@@ -498,7 +623,18 @@ public abstract class BaseLoadBalancer i
       LOG.warn("Wanted to do random assignment but no servers to assign to");
       return null;
     }
-    return servers.get(RANDOM.nextInt(servers.size()));
+    int numServers = servers.size();
+    if (numServers == 1) return servers.get(0);
+    if (shouldBeOnMaster(regionInfo) && servers.contains(masterServerName)) {
+      return masterServerName;
+    }
+    int i = RANDOM.nextInt(numServers);
+    ServerName sn = servers.get(i);
+    if (sn.equals(masterServerName)) {
+      i = (i == 0 ? 1 : i - 1);
+      sn = servers.get(i);
+    }
+    return sn;
   }
 
   /**
@@ -524,6 +660,16 @@ public abstract class BaseLoadBalancer i
     // Update metrics
     metricsBalancer.incrMiscInvocations();
 
+    if (regions.isEmpty() || servers.isEmpty()) {
+      return null;
+    }
+    Map<ServerName, List<HRegionInfo>> assignments = new TreeMap<ServerName, List<HRegionInfo>>();
+    int numServers = servers.size();
+    if (numServers == 1) { // Only one server, nothing fancy we can do here
+      assignments.put(servers.get(0), new ArrayList<HRegionInfo>(regions.keySet()));
+      return assignments;
+    }
+
     // Group all of the old assignments by their hostname.
     // We can't group directly by ServerName since the servers all have
     // new start-codes.
@@ -532,12 +678,11 @@ public abstract class BaseLoadBalancer i
     // servers on the same host on different ports.
     ArrayListMultimap<String, ServerName> serversByHostname = ArrayListMultimap.create();
     for (ServerName server : servers) {
-      serversByHostname.put(server.getHostname(), server);
+      if (!server.equals(masterServerName)) {
+        serversByHostname.put(server.getHostname(), server);
+      }
     }
 
-    // Now come up with new assignments
-    Map<ServerName, List<HRegionInfo>> assignments = new TreeMap<ServerName, List<HRegionInfo>>();
-
     for (ServerName server : servers) {
       assignments.put(server, new ArrayList<HRegionInfo>());
     }
@@ -547,6 +692,9 @@ public abstract class BaseLoadBalancer i
     // after the cluster restart.
     Set<String> oldHostsNoLongerPresent = Sets.newTreeSet();
 
+    // Master regionserver is in the server list.
+    boolean masterIncluded = servers.contains(masterServerName);
+
     int numRandomAssignments = 0;
     int numRetainedAssigments = 0;
     for (Map.Entry<HRegionInfo, ServerName> entry : regions.entrySet()) {
@@ -556,10 +704,22 @@ public abstract class BaseLoadBalancer i
       if (oldServerName != null) {
         localServers = serversByHostname.get(oldServerName.getHostname());
       }
-      if (localServers.isEmpty()) {
+      if (masterIncluded && shouldBeOnMaster(region)) {
+        assignments.get(masterServerName).add(region);
+        if (localServers.contains(masterServerName)) {
+          numRetainedAssigments++;
+        } else {
+          numRandomAssignments++;
+        }
+      } else if (localServers.isEmpty()) {
         // No servers on the new cluster match up with this hostname,
         // assign randomly.
-        ServerName randomServer = servers.get(RANDOM.nextInt(servers.size()));
+        int i = RANDOM.nextInt(numServers);
+        ServerName randomServer = servers.get(i);
+        if (randomServer.equals(masterServerName)) {
+          i = (i == 0 ? 1 : i - 1);
+          randomServer = servers.get(i);
+        }
         assignments.get(randomServer).add(region);
         numRandomAssignments++;
         if (oldServerName != null) oldHostsNoLongerPresent.add(oldServerName.getHostname());

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterLoadState.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterLoadState.java?rev=1581479&r1=1581478&r2=1581479&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterLoadState.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterLoadState.java Tue Mar 25 19:34:52 2014
@@ -35,20 +35,29 @@ public class ClusterLoadState {
   private int numRegions = 0;
   private int numServers = 0;
 
-  public ClusterLoadState(Map<ServerName, List<HRegionInfo>> clusterState) {
-    super();
+  public ClusterLoadState(ServerName master,
+      Map<ServerName, List<HRegionInfo>> clusterState) {
     this.numRegions = 0;
     this.numServers = clusterState.size();
     this.clusterState = clusterState;
     serversByLoad = new TreeMap<ServerAndLoad, List<HRegionInfo>>();
     // Iterate so we can count regions as we build the map
     for (Map.Entry<ServerName, List<HRegionInfo>> server : clusterState.entrySet()) {
+      if (master != null && numServers > 1 && master.equals(server.getKey())) {
+        // Don't count the master regionserver since its
+        // load is meant to be low.
+        continue;
+      }
       List<HRegionInfo> regions = server.getValue();
       int sz = regions.size();
       if (sz == 0) emptyRegionServerPresent = true;
       numRegions += sz;
       serversByLoad.put(new ServerAndLoad(server.getKey(), sz), regions);
     }
+    if (master != null && numServers > 1
+        && clusterState.containsKey(master)) {
+      numServers--;
+    }
   }
 
   Map<ServerName, List<HRegionInfo>> getClusterState() {

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java?rev=1581479&r1=1581478&r2=1581479&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java Tue Mar 25 19:34:52 2014
@@ -180,10 +180,14 @@ public class SimpleLoadBalancer extends 
    */
   public List<RegionPlan> balanceCluster(
       Map<ServerName, List<HRegionInfo>> clusterMap) {
+    List<RegionPlan> regionsToReturn = balanceMasterRegions(clusterMap);
+    if (regionsToReturn != null) {
+      return regionsToReturn;
+    }
     boolean emptyRegionServerPresent = false;
     long startTime = System.currentTimeMillis();
 
-    ClusterLoadState cs = new ClusterLoadState(clusterMap);
+    ClusterLoadState cs = new ClusterLoadState(masterServerName, clusterMap);
 
     if (!this.needsBalance(cs)) return null;
     
@@ -204,7 +208,7 @@ public class SimpleLoadBalancer extends 
     // TODO: Look at data block locality or a more complex load to do this
     MinMaxPriorityQueue<RegionPlan> regionsToMove =
       MinMaxPriorityQueue.orderedBy(rpComparator).create();
-    List<RegionPlan> regionsToReturn = new ArrayList<RegionPlan>();
+    regionsToReturn = new ArrayList<RegionPlan>();
 
     // Walk down most loaded, pruning each to the max
     int serversOverloaded = 0;
@@ -233,8 +237,9 @@ public class SimpleLoadBalancer extends 
           hri = regions.get(regions.size() - 1 - i);
         }
         i++;
-        // Don't rebalance meta regions.
-        if (hri.isMetaRegion()) continue;
+        // Don't rebalance special regions.
+        if (shouldBeOnMaster(hri)
+            && masterServerName.equals(sal.getServerName())) continue;
         regionsToMove.add(new RegionPlan(hri, sal.getServerName(), null));
         numTaken++;
         if (numTaken >= numToOffload) break;



Mime
View raw message