hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From chia7...@apache.org
Subject [17/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base
Date Thu, 28 Sep 2017 12:30:47 GMT
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index e5f1848..2fbbc3f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -30,11 +30,9 @@ import org.apache.hadoop.hbase.CellScannable;
 import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Action;
 import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
@@ -46,6 +44,7 @@ import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionCoprocessorServiceExec;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Row;
 import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Scan;
@@ -54,6 +53,12 @@ import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.filter.ByteArrayComparable;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.security.token.Token;
+import org.apache.yetus.audience.InterfaceAudience;
+
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
@@ -70,7 +75,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavor
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Condition;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetRequest;
@@ -81,6 +85,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationPr
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.CompareType;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
@@ -97,12 +102,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTabl
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
@@ -115,17 +117,19 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormaliz
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesRequest;
@@ -140,10 +144,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetRe
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.security.token.Token;
 
 /**
  * Helper utility to build protocol buffer requests,
@@ -886,10 +886,10 @@ public final class RequestConverter {
   * @return a protocol buffer OpenRegionRequest
   */
  public static OpenRegionRequest
-     buildOpenRegionRequest(ServerName server, final List<Pair<HRegionInfo,
+     buildOpenRegionRequest(ServerName server, final List<Pair<RegionInfo,
          List<ServerName>>> regionOpenInfos, Boolean openForReplay) {
    OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder();
-   for (Pair<HRegionInfo, List<ServerName>> regionOpenInfo: regionOpenInfos) {
+   for (Pair<RegionInfo, List<ServerName>> regionOpenInfo: regionOpenInfos) {
      builder.addOpenInfo(buildRegionOpenInfo(regionOpenInfo.getFirst(),
        regionOpenInfo.getSecond(), openForReplay));
    }
@@ -911,7 +911,7 @@ public final class RequestConverter {
   * @return a protocol buffer OpenRegionRequest
   */
  public static OpenRegionRequest buildOpenRegionRequest(ServerName server,
-     final HRegionInfo region, List<ServerName> favoredNodes,
+     final RegionInfo region, List<ServerName> favoredNodes,
      Boolean openForReplay) {
    OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder();
    builder.addOpenInfo(buildRegionOpenInfo(region, favoredNodes,
@@ -929,12 +929,12 @@ public final class RequestConverter {
   * @return a protocol buffer UpdateFavoredNodesRequest
   */
  public static UpdateFavoredNodesRequest buildUpdateFavoredNodesRequest(
-     final List<Pair<HRegionInfo, List<ServerName>>> updateRegionInfos) {
+     final List<Pair<RegionInfo, List<ServerName>>> updateRegionInfos) {
    UpdateFavoredNodesRequest.Builder ubuilder = UpdateFavoredNodesRequest.newBuilder();
    if (updateRegionInfos != null && !updateRegionInfos.isEmpty()) {
      RegionUpdateInfo.Builder builder = RegionUpdateInfo.newBuilder();
-    for (Pair<HRegionInfo, List<ServerName>> pair : updateRegionInfos) {
-      builder.setRegion(HRegionInfo.convert(pair.getFirst()));
+    for (Pair<RegionInfo, List<ServerName>> pair : updateRegionInfos) {
+      builder.setRegion(ProtobufUtil.toRegionInfo(pair.getFirst()));
       for (ServerName server : pair.getSecond()) {
         builder.addFavoredNodes(ProtobufUtil.toServerName(server));
       }
@@ -950,9 +950,9 @@ public final class RequestConverter {
    *
    *  @param regionInfo Region we are warming up
    */
-  public static WarmupRegionRequest buildWarmupRegionRequest(final HRegionInfo regionInfo) {
+  public static WarmupRegionRequest buildWarmupRegionRequest(final RegionInfo regionInfo) {
     WarmupRegionRequest.Builder builder = WarmupRegionRequest.newBuilder();
-    builder.setRegionInfo(HRegionInfo.convert(regionInfo));
+    builder.setRegionInfo(ProtobufUtil.toRegionInfo(regionInfo));
     return builder.build();
   }
 
@@ -1184,11 +1184,11 @@ public final class RequestConverter {
     return builder.build();
   }
 
-  public static SplitTableRegionRequest buildSplitTableRegionRequest(final HRegionInfo regionInfo,
+  public static SplitTableRegionRequest buildSplitTableRegionRequest(final RegionInfo regionInfo,
       final byte[] splitRow, final long nonceGroup, final long nonce)
       throws DeserializationException {
     SplitTableRegionRequest.Builder builder = SplitTableRegionRequest.newBuilder();
-    builder.setRegionInfo(HRegionInfo.convert(regionInfo));
+    builder.setRegionInfo(ProtobufUtil.toRegionInfo(regionInfo));
     if (splitRow != null) {
       builder.setSplitRow(UnsafeByteOperations.unsafeWrap(splitRow));
     }
@@ -1611,10 +1611,10 @@ public final class RequestConverter {
    * Create a RegionOpenInfo based on given region info and version of offline node
    */
   public static RegionOpenInfo buildRegionOpenInfo(
-      final HRegionInfo region,
+      final RegionInfo region,
       final List<ServerName> favoredNodes, Boolean openForReplay) {
     RegionOpenInfo.Builder builder = RegionOpenInfo.newBuilder();
-    builder.setRegion(HRegionInfo.convert(region));
+    builder.setRegion(ProtobufUtil.toRegionInfo(region));
     if (favoredNodes != null) {
       for (ServerName server : favoredNodes) {
         builder.addFavoredNodes(ProtobufUtil.toServerName(server));

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java
index 93f74d3..611eaae 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java
@@ -28,16 +28,19 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.SingleResponse;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ServerInfo;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRequest;
@@ -55,9 +58,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCata
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
-import org.apache.hadoop.util.StringUtils;
 
 import edu.umd.cs.findbugs.annotations.Nullable;
 
@@ -237,7 +237,7 @@ public final class ResponseConverter {
    * @param proto the GetOnlineRegionResponse
    * @return the list of region info
    */
-  public static List<HRegionInfo> getRegionInfos(final GetOnlineRegionResponse proto) {
+  public static List<RegionInfo> getRegionInfos(final GetOnlineRegionResponse proto) {
     if (proto == null || proto.getRegionInfoCount() == 0) return null;
     return ProtobufUtil.getRegionInfos(proto);
   }
@@ -280,10 +280,10 @@ public final class ResponseConverter {
    * @return the response
    */
   public static GetOnlineRegionResponse buildGetOnlineRegionResponse(
-      final List<HRegionInfo> regions) {
+      final List<RegionInfo> regions) {
     GetOnlineRegionResponse.Builder builder = GetOnlineRegionResponse.newBuilder();
-    for (HRegionInfo region: regions) {
-      builder.addRegionInfo(HRegionInfo.convert(region));
+    for (RegionInfo region: regions) {
+      builder.addRegionInfo(ProtobufUtil.toRegionInfo(region));
     }
     return builder.build();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
index dada632..cddde2f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hbase.zookeeper;
 
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException;
-
 import java.io.EOFException;
 import java.io.IOException;
 import java.net.ConnectException;
@@ -27,19 +25,19 @@ import java.net.SocketException;
 import java.net.SocketTimeoutException;
 import java.net.UnknownHostException;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
 import java.util.Locale;
 
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.RetriesExhaustedException;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
@@ -47,17 +45,20 @@ import org.apache.hadoop.hbase.ipc.FailedServerException;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 import org.apache.hadoop.hbase.master.RegionState;
+import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.zookeeper.KeeperException;
+
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.MetaRegionServer;
-import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.zookeeper.KeeperException;
 
 /**
  * Utility class to perform operation (get/wait for/verify/set/delete) on znode in ZooKeeper
@@ -93,8 +94,8 @@ public class MetaTableLocator {
    * @param zkw ZooKeeper watcher to be used
    * @return meta table regions and their locations.
    */
-  public List<Pair<HRegionInfo, ServerName>> getMetaRegionsAndLocations(ZooKeeperWatcher zkw) {
-    return getMetaRegionsAndLocations(zkw, HRegionInfo.DEFAULT_REPLICA_ID);
+  public List<Pair<RegionInfo, ServerName>> getMetaRegionsAndLocations(ZooKeeperWatcher zkw) {
+    return getMetaRegionsAndLocations(zkw, RegionInfo.DEFAULT_REPLICA_ID);
   }
 
   /**
@@ -103,12 +104,12 @@ public class MetaTableLocator {
    * @param replicaId
    * @return meta table regions and their locations.
    */
-  public List<Pair<HRegionInfo, ServerName>> getMetaRegionsAndLocations(ZooKeeperWatcher zkw,
+  public List<Pair<RegionInfo, ServerName>> getMetaRegionsAndLocations(ZooKeeperWatcher zkw,
       int replicaId) {
     ServerName serverName = getMetaRegionLocation(zkw, replicaId);
-    List<Pair<HRegionInfo, ServerName>> list = new ArrayList<>(1);
+    List<Pair<RegionInfo, ServerName>> list = new ArrayList<>(1);
     list.add(new Pair<>(RegionReplicaUtil.getRegionInfoForReplica(
-        HRegionInfo.FIRST_META_REGIONINFO, replicaId), serverName));
+        RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId), serverName));
     return list;
   }
 
@@ -116,8 +117,8 @@ public class MetaTableLocator {
    * @param zkw ZooKeeper watcher to be used
    * @return List of meta regions
    */
-  public List<HRegionInfo> getMetaRegions(ZooKeeperWatcher zkw) {
-    return getMetaRegions(zkw, HRegionInfo.DEFAULT_REPLICA_ID);
+  public List<RegionInfo> getMetaRegions(ZooKeeperWatcher zkw) {
+    return getMetaRegions(zkw, RegionInfo.DEFAULT_REPLICA_ID);
   }
 
   /**
@@ -126,17 +127,17 @@ public class MetaTableLocator {
    * @param replicaId
    * @return List of meta regions
    */
-  public List<HRegionInfo> getMetaRegions(ZooKeeperWatcher zkw, int replicaId) {
-    List<Pair<HRegionInfo, ServerName>> result;
+  public List<RegionInfo> getMetaRegions(ZooKeeperWatcher zkw, int replicaId) {
+    List<Pair<RegionInfo, ServerName>> result;
     result = getMetaRegionsAndLocations(zkw, replicaId);
-    return getListOfHRegionInfos(result);
+    return getListOfRegionInfos(result);
   }
 
-  private List<HRegionInfo> getListOfHRegionInfos(
-      final List<Pair<HRegionInfo, ServerName>> pairs) {
-    if (pairs == null || pairs.isEmpty()) return null;
-    List<HRegionInfo> result = new ArrayList<>(pairs.size());
-    for (Pair<HRegionInfo, ServerName> pair: pairs) {
+  private List<RegionInfo> getListOfRegionInfos(
+      final List<Pair<RegionInfo, ServerName>> pairs) {
+    if (pairs == null || pairs.isEmpty()) return Collections.EMPTY_LIST;
+    List<RegionInfo> result = new ArrayList<>(pairs.size());
+    for (Pair<RegionInfo, ServerName> pair: pairs) {
       result.add(pair.getFirst());
     }
     return result;
@@ -185,7 +186,7 @@ public class MetaTableLocator {
    */
   public ServerName waitMetaRegionLocation(ZooKeeperWatcher zkw, long timeout)
   throws InterruptedException, NotAllMetaRegionsOnlineException {
-    return waitMetaRegionLocation(zkw, HRegionInfo.DEFAULT_REPLICA_ID, timeout);
+    return waitMetaRegionLocation(zkw, RegionInfo.DEFAULT_REPLICA_ID, timeout);
   }
 
   /**
@@ -261,7 +262,7 @@ public class MetaTableLocator {
   public boolean verifyMetaRegionLocation(ClusterConnection hConnection,
       ZooKeeperWatcher zkw, final long timeout)
   throws InterruptedException, IOException {
-    return verifyMetaRegionLocation(hConnection, zkw, timeout, HRegionInfo.DEFAULT_REPLICA_ID);
+    return verifyMetaRegionLocation(hConnection, zkw, timeout, RegionInfo.DEFAULT_REPLICA_ID);
   }
 
   /**
@@ -291,7 +292,7 @@ public class MetaTableLocator {
     }
     return (service != null) && verifyRegionLocation(connection, service,
             getMetaRegionLocation(zkw, replicaId), RegionReplicaUtil.getRegionInfoForReplica(
-                HRegionInfo.FIRST_META_REGIONINFO, replicaId).getRegionName());
+                RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId).getRegionName());
   }
 
   /**
@@ -425,7 +426,7 @@ public class MetaTableLocator {
    */
   public static void setMetaLocation(ZooKeeperWatcher zookeeper,
       ServerName serverName, RegionState.State state) throws KeeperException {
-    setMetaLocation(zookeeper, serverName, HRegionInfo.DEFAULT_REPLICA_ID, state);
+    setMetaLocation(zookeeper, serverName, RegionInfo.DEFAULT_REPLICA_ID, state);
   }
 
   /**
@@ -456,7 +457,7 @@ public class MetaTableLocator {
       ZKUtil.setData(zookeeper,
           zookeeper.znodePaths.getZNodeForReplica(replicaId), data);
     } catch(KeeperException.NoNodeException nne) {
-      if (replicaId == HRegionInfo.DEFAULT_REPLICA_ID) {
+      if (replicaId == RegionInfo.DEFAULT_REPLICA_ID) {
         LOG.debug("META region location doesn't exist, create it");
       } else {
         LOG.debug("META region location doesn't exist for replicaId=" + replicaId +
@@ -470,7 +471,7 @@ public class MetaTableLocator {
    * Load the meta region state from the meta server ZNode.
    */
   public static RegionState getMetaRegionState(ZooKeeperWatcher zkw) throws KeeperException {
-    return getMetaRegionState(zkw, HRegionInfo.DEFAULT_REPLICA_ID);
+    return getMetaRegionState(zkw, RegionInfo.DEFAULT_REPLICA_ID);
   }
 
   /**
@@ -514,7 +515,7 @@ public class MetaTableLocator {
       state = RegionState.State.OFFLINE;
     }
     return new RegionState(
-        RegionReplicaUtil.getRegionInfoForReplica(HRegionInfo.FIRST_META_REGIONINFO, replicaId),
+        RegionReplicaUtil.getRegionInfoForReplica(RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId),
       state, serverName);
   }
 
@@ -525,12 +526,12 @@ public class MetaTableLocator {
    */
   public void deleteMetaLocation(ZooKeeperWatcher zookeeper)
   throws KeeperException {
-    deleteMetaLocation(zookeeper, HRegionInfo.DEFAULT_REPLICA_ID);
+    deleteMetaLocation(zookeeper, RegionInfo.DEFAULT_REPLICA_ID);
   }
 
   public void deleteMetaLocation(ZooKeeperWatcher zookeeper, int replicaId)
   throws KeeperException {
-    if (replicaId == HRegionInfo.DEFAULT_REPLICA_ID) {
+    if (replicaId == RegionInfo.DEFAULT_REPLICA_ID) {
       LOG.info("Deleting hbase:meta region location in ZooKeeper");
     } else {
       LOG.info("Deleting hbase:meta for " + replicaId + " region location in ZooKeeper");
@@ -586,7 +587,7 @@ public class MetaTableLocator {
   public ServerName blockUntilAvailable(final ZooKeeperWatcher zkw,
       final long timeout)
   throws InterruptedException {
-    return blockUntilAvailable(zkw, HRegionInfo.DEFAULT_REPLICA_ID, timeout);
+    return blockUntilAvailable(zkw, RegionInfo.DEFAULT_REPLICA_ID, timeout);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
index 471ed96..f5166e0 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
@@ -19,6 +19,9 @@
 
 package org.apache.hadoop.hbase.client;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.io.InterruptedIOException;
@@ -58,26 +61,23 @@ import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.AsyncProcessTask.ListRowAccess;
+import org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows;
+import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
+import org.apache.hadoop.hbase.client.backoff.ServerStatistics;
 import org.apache.hadoop.hbase.client.coprocessor.Batch;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
 import org.junit.Assert;
 import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
-import org.junit.Ignore;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestRule;
 import org.mockito.Mockito;
-import org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows;
-import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-import org.apache.hadoop.hbase.client.backoff.ServerStatistics;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-import org.apache.hadoop.hbase.testclassification.ClientTests;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
 
 @Category({ClientTests.class, MediumTests.class})
 public class TestAsyncProcess {
@@ -106,9 +106,9 @@ public class TestAsyncProcess {
   private static final HRegionLocation loc3 = new HRegionLocation(hri3, sn2);
 
   // Replica stuff
-  private static final HRegionInfo hri1r1 = RegionReplicaUtil.getRegionInfoForReplica(hri1, 1),
-      hri1r2 = RegionReplicaUtil.getRegionInfoForReplica(hri1, 2);
-  private static final HRegionInfo hri2r1 = RegionReplicaUtil.getRegionInfoForReplica(hri2, 1);
+  private static final RegionInfo hri1r1 = RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
+  private static final RegionInfo hri1r2 = RegionReplicaUtil.getRegionInfoForReplica(hri1, 2);
+  private static final RegionInfo hri2r1 = RegionReplicaUtil.getRegionInfoForReplica(hri2, 1);
   private static final RegionLocations hrls1 = new RegionLocations(new HRegionLocation(hri1, sn),
       new HRegionLocation(hri1r1, sn2), new HRegionLocation(hri1r2, sn3));
   private static final RegionLocations hrls2 = new RegionLocations(new HRegionLocation(hri2, sn2),
@@ -355,8 +355,8 @@ public class TestAsyncProcess {
     private Map<ServerName, Long> customPrimarySleepMs = new HashMap<>();
     private final AtomicLong replicaCalls = new AtomicLong(0);
 
-    public void addFailures(HRegionInfo... hris) {
-      for (HRegionInfo hri : hris) {
+    public void addFailures(RegionInfo... hris) {
+      for (RegionInfo hri : hris) {
         failures.add(hri.getRegionName());
       }
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AsyncAggregationClient.java
----------------------------------------------------------------------
diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AsyncAggregationClient.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AsyncAggregationClient.java
index d7589fb..51c8248 100644
--- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AsyncAggregationClient.java
+++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AsyncAggregationClient.java
@@ -20,8 +20,6 @@ package org.apache.hadoop.hbase.client.coprocessor;
 import static org.apache.hadoop.hbase.client.coprocessor.AggregationHelper.getParsedGenericInstance;
 import static org.apache.hadoop.hbase.client.coprocessor.AggregationHelper.validateArgAndGetPB;
 
-import com.google.protobuf.Message;
-
 import java.io.IOException;
 import java.util.Map;
 import java.util.NavigableMap;
@@ -31,11 +29,10 @@ import java.util.TreeMap;
 import java.util.concurrent.CompletableFuture;
 
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.RawAsyncTable;
 import org.apache.hadoop.hbase.client.RawAsyncTable.CoprocessorCallback;
 import org.apache.hadoop.hbase.client.RawScanResultConsumer;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter;
@@ -44,6 +41,9 @@ import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateRespo
 import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateService;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ReflectionUtils;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import com.google.protobuf.Message;
 
 /**
  * This client class is for invoking the aggregate functions deployed on the Region Server side via
@@ -73,7 +73,7 @@ public class AsyncAggregationClient {
     }
 
     @Override
-    public synchronized void onRegionError(HRegionInfo region, Throwable error) {
+    public synchronized void onRegionError(RegionInfo region, Throwable error) {
       completeExceptionally(error);
     }
 
@@ -82,11 +82,11 @@ public class AsyncAggregationClient {
       completeExceptionally(error);
     }
 
-    protected abstract void aggregate(HRegionInfo region, AggregateResponse resp)
+    protected abstract void aggregate(RegionInfo region, AggregateResponse resp)
         throws IOException;
 
     @Override
-    public synchronized void onRegionComplete(HRegionInfo region, AggregateResponse resp) {
+    public synchronized void onRegionComplete(RegionInfo region, AggregateResponse resp) {
       try {
         aggregate(region, resp);
       } catch (IOException e) {
@@ -135,7 +135,7 @@ public class AsyncAggregationClient {
       private R max;
 
       @Override
-      protected void aggregate(HRegionInfo region, AggregateResponse resp) throws IOException {
+      protected void aggregate(RegionInfo region, AggregateResponse resp) throws IOException {
         if (resp.getFirstPartCount() > 0) {
           R result = getCellValueFromProto(ci, resp, 0);
           if (max == null || (result != null && ci.compare(max, result) < 0)) {
@@ -171,7 +171,7 @@ public class AsyncAggregationClient {
       private R min;
 
       @Override
-      protected void aggregate(HRegionInfo region, AggregateResponse resp) throws IOException {
+      protected void aggregate(RegionInfo region, AggregateResponse resp) throws IOException {
         if (resp.getFirstPartCount() > 0) {
           R result = getCellValueFromProto(ci, resp, 0);
           if (min == null || (result != null && ci.compare(min, result) > 0)) {
@@ -208,7 +208,7 @@ public class AsyncAggregationClient {
       private long count;
 
       @Override
-      protected void aggregate(HRegionInfo region, AggregateResponse resp) throws IOException {
+      protected void aggregate(RegionInfo region, AggregateResponse resp) throws IOException {
         count += resp.getFirstPart(0).asReadOnlyByteBuffer().getLong();
       }
 
@@ -239,7 +239,7 @@ public class AsyncAggregationClient {
       private S sum;
 
       @Override
-      protected void aggregate(HRegionInfo region, AggregateResponse resp) throws IOException {
+      protected void aggregate(RegionInfo region, AggregateResponse resp) throws IOException {
         if (resp.getFirstPartCount() > 0) {
           S s = getPromotedValueFromProto(ci, resp, 0);
           sum = ci.add(sum, s);
@@ -276,7 +276,7 @@ public class AsyncAggregationClient {
       long count = 0L;
 
       @Override
-      protected void aggregate(HRegionInfo region, AggregateResponse resp) throws IOException {
+      protected void aggregate(RegionInfo region, AggregateResponse resp) throws IOException {
         if (resp.getFirstPartCount() > 0) {
           sum = ci.add(sum, getPromotedValueFromProto(ci, resp, 0));
           count += resp.getSecondPart().asReadOnlyByteBuffer().getLong();
@@ -315,7 +315,7 @@ public class AsyncAggregationClient {
       private long count;
 
       @Override
-      protected void aggregate(HRegionInfo region, AggregateResponse resp) throws IOException {
+      protected void aggregate(RegionInfo region, AggregateResponse resp) throws IOException {
         if (resp.getFirstPartCount() > 0) {
           sum = ci.add(sum, getPromotedValueFromProto(ci, resp, 0));
           sumSq = ci.add(sumSq, getPromotedValueFromProto(ci, resp, 1));
@@ -357,7 +357,7 @@ public class AsyncAggregationClient {
           private final NavigableMap<byte[], S> map = new TreeMap<>(Bytes.BYTES_COMPARATOR);
 
           @Override
-          protected void aggregate(HRegionInfo region, AggregateResponse resp) throws IOException {
+          protected void aggregate(RegionInfo region, AggregateResponse resp) throws IOException {
             if (resp.getFirstPartCount() > 0) {
               map.put(region.getStartKey(), getPromotedValueFromProto(ci, resp, firstPartIndex));
             }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java
----------------------------------------------------------------------
diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java
index 9b8901e..5095752 100644
--- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java
+++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java
@@ -18,9 +18,6 @@
  */
 package org.apache.hadoop.hbase.coprocessor;
 
-import com.google.protobuf.RpcCallback;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.Service;
 import java.io.Closeable;
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
@@ -39,12 +36,10 @@ import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.yetus.audience.InterfaceStability;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
@@ -64,7 +59,6 @@ import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.security.token.FsDelegationToken;
-import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hbase.util.ArrayUtils;
 import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -77,6 +71,14 @@ import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.yetus.audience.InterfaceStability;
+
+import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+
+import com.google.protobuf.RpcCallback;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.Service;
 
 /**
  * Export an HBase table. Writes content to sequence files up in HDFS. Use
@@ -179,7 +181,7 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces
   }
 
   private static SequenceFile.Writer.Option getOutputPath(final Configuration conf,
-          final HRegionInfo info, final ExportProtos.ExportRequest request) throws IOException {
+          final RegionInfo info, final ExportProtos.ExportRequest request) throws IOException {
     Path file = new Path(request.getOutputPath(), "export-" + info.getEncodedName());
     FileSystem fs = file.getFileSystem(conf);
     if (fs.exists(file)) {
@@ -189,7 +191,7 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces
   }
 
   private static List<SequenceFile.Writer.Option> getWriterOptions(final Configuration conf,
-          final HRegionInfo info, final ExportProtos.ExportRequest request) throws IOException {
+          final RegionInfo info, final ExportProtos.ExportRequest request) throws IOException {
     List<SequenceFile.Writer.Option> rval = new LinkedList<>();
     rval.add(SequenceFile.Writer.keyClass(ImmutableBytesWritable.class));
     rval.add(SequenceFile.Writer.valueClass(Result.class));
@@ -341,7 +343,7 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces
     }
   }
 
-  private Scan validateKey(final HRegionInfo region, final ExportProtos.ExportRequest request) throws IOException {
+  private Scan validateKey(final RegionInfo region, final ExportProtos.ExportRequest request) throws IOException {
     Scan scan = ProtobufUtil.toScan(request.getScan());
     byte[] regionStartKey = region.getStartKey();
     byte[] originStartKey = scan.getStartRow();

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestRefreshHFilesEndpoint.java
----------------------------------------------------------------------
diff --git a/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestRefreshHFilesEndpoint.java b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestRefreshHFilesEndpoint.java
index 257b075..46336d5 100644
--- a/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestRefreshHFilesEndpoint.java
+++ b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestRefreshHFilesEndpoint.java
@@ -18,19 +18,25 @@
 
 package org.apache.hadoop.hbase.coprocessor.example;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RetriesExhaustedException;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableDescriptor;
@@ -41,7 +47,6 @@ import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
-import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -51,13 +56,6 @@ import org.junit.After;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
 @Category(MediumTests.class)
 public class TestRefreshHFilesEndpoint {
   private static final Log LOG = LogFactory.getLog(TestRefreshHFilesEndpoint.class);
@@ -139,7 +137,7 @@ public class TestRefreshHFilesEndpoint {
     HStoreWithFaultyRefreshHFilesAPI store;
 
     public HRegionForRefreshHFilesEP(final Path tableDir, final WAL wal, final FileSystem fs,
-                                     final Configuration confParam, final HRegionInfo regionInfo,
+                                     final Configuration confParam, final RegionInfo regionInfo,
                                      final TableDescriptor htd, final RegionServerServices rsServices) {
       super(tableDir, wal, fs, confParam, regionInfo, htd, rsServices);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java
index b5306f2..8285054 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java
@@ -18,11 +18,17 @@
 
 package org.apache.hadoop.hbase.mapreduce;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.List;
+
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
@@ -34,13 +40,9 @@ import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.RecordReader;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.yetus.audience.InterfaceAudience;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.lang.reflect.Method;
-import java.util.ArrayList;
-import java.util.List;
+import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 
 /**
  * TableSnapshotInputFormat allows a MapReduce job to run over a table snapshot. The job
@@ -120,10 +122,18 @@ public class TableSnapshotInputFormat extends InputFormat<ImmutableBytesWritable
       delegate.readFields(in);
     }
 
+    /**
+     * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+     *             Use {@link #getRegion()}
+     */
+    @Deprecated
     public HRegionInfo getRegionInfo() {
       return delegate.getRegionInfo();
     }
 
+    public RegionInfo getRegion() {
+      return delegate.getRegionInfo();
+    }
   }
 
   @VisibleForTesting

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
index a94f50e..aec5fa0 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
-import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.mapreduce.JobUtil;
 import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
@@ -113,7 +113,7 @@ public class CompactionTool extends Configured implements Tool {
         Path regionDir = path.getParent();
         Path tableDir = regionDir.getParent();
         TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
-        HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
+        RegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
         compactStoreFiles(tableDir, htd, hri,
             path.getName(), compactOnce, major);
       } else if (isRegionDir(fs, path)) {
@@ -139,7 +139,7 @@ public class CompactionTool extends Configured implements Tool {
     private void compactRegion(final Path tableDir, final TableDescriptor htd,
         final Path regionDir, final boolean compactOnce, final boolean major)
         throws IOException {
-      HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
+      RegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
       for (Path familyDir: FSUtils.getFamilyDirs(fs, regionDir)) {
         compactStoreFiles(tableDir, htd, hri, familyDir.getName(), compactOnce, major);
       }
@@ -151,7 +151,7 @@ public class CompactionTool extends Configured implements Tool {
      * no more compactions are needed. Uses the Configuration settings provided.
      */
     private void compactStoreFiles(final Path tableDir, final TableDescriptor htd,
-        final HRegionInfo hri, final String familyName, final boolean compactOnce,
+        final RegionInfo hri, final String familyName, final boolean compactOnce,
         final boolean major) throws IOException {
       HStore store = getStore(conf, fs, tableDir, htd, hri, familyName, tmpDir);
       LOG.info("Compact table=" + htd.getTableName() +
@@ -183,7 +183,7 @@ public class CompactionTool extends Configured implements Tool {
      * the store dir to compact as source.
      */
     private static HStore getStore(final Configuration conf, final FileSystem fs,
-        final Path tableDir, final TableDescriptor htd, final HRegionInfo hri,
+        final Path tableDir, final TableDescriptor htd, final RegionInfo hri,
         final String familyName, final Path tempDir) throws IOException {
       HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, hri) {
         @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
index e8ed727..5dd9bea 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
@@ -19,9 +19,9 @@
 package org.apache.hadoop.hbase.snapshot;
 
 import java.io.BufferedInputStream;
-import java.io.FileNotFoundException;
 import java.io.DataInput;
 import java.io.DataOutput;
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.util.ArrayList;
@@ -34,7 +34,6 @@ import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.Option;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -44,18 +43,16 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.io.FileLink;
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.io.WALLink;
+import org.apache.hadoop.hbase.io.hadoopbackport.ThrottledInputStream;
 import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
 import org.apache.hadoop.hbase.mob.MobUtils;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotFileInfo;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
 import org.apache.hadoop.hbase.util.AbstractHBaseTool;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.HFileArchiveUtil;
@@ -64,18 +61,22 @@ import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.mapreduce.InputFormat;
+import org.apache.hadoop.mapreduce.InputSplit;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.mapreduce.InputFormat;
-import org.apache.hadoop.mapreduce.InputSplit;
 import org.apache.hadoop.mapreduce.RecordReader;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
 import org.apache.hadoop.mapreduce.security.TokenCache;
-import org.apache.hadoop.hbase.io.hadoopbackport.ThrottledInputStream;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotFileInfo;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
 
 /**
  * Export the specified snapshot to a given FileSystem.
@@ -566,7 +567,7 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool {
     SnapshotReferenceUtil.visitReferencedFiles(conf, fs, snapshotDir, snapshotDesc,
       new SnapshotReferenceUtil.SnapshotVisitor() {
         @Override
-        public void storeFile(final HRegionInfo regionInfo, final String family,
+        public void storeFile(final RegionInfo regionInfo, final String family,
             final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
           // for storeFile.hasReference() case, copied as part of the manifest
           if (!storeFile.hasReference()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
index b581e04..b2b9c4d 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
@@ -40,11 +40,10 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeepDeletedCells;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.TableName;
@@ -53,6 +52,7 @@ import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
@@ -65,13 +65,13 @@ import org.apache.hadoop.hbase.filter.PrefixFilter;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.mapreduce.Import.KeyValueImporter;
 import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
-import org.apache.hadoop.hbase.wal.WALEdit;
-import org.apache.hadoop.hbase.wal.WAL;
-import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.LauncherSecurityManager;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALEdit;
+import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.mapreduce.Mapper.Context;
 import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.ToolRunner;
@@ -739,7 +739,7 @@ public class TestImportExport {
       Table importTable = UTIL.createTable(TableName.valueOf(importTableName), FAMILYA, 3);
 
       // Register the wal listener for the import table
-      HRegionInfo region = UTIL.getHBaseCluster().getRegionServerThreads().get(0).getRegionServer()
+      RegionInfo region = UTIL.getHBaseCluster().getRegionServerThreads().get(0).getRegionServer()
           .getRegions(importTable.getName()).get(0).getRegionInfo();
       TableWALActionListener walListener = new TableWALActionListener(region);
       WAL wal = UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL(region);
@@ -773,15 +773,15 @@ public class TestImportExport {
   }
 
   /**
-   * This listens to the {@link #visitLogEntryBeforeWrite(HRegionInfo, WALKey, WALEdit)} to
+   * This listens to the {@link #visitLogEntryBeforeWrite(RegionInfo, WALKey, WALEdit)} to
    * identify that an entry is written to the Write Ahead Log for the given table.
    */
   private static class TableWALActionListener extends WALActionsListener.Base {
 
-    private HRegionInfo regionInfo;
+    private RegionInfo regionInfo;
     private boolean isVisited = false;
 
-    public TableWALActionListener(HRegionInfo region) {
+    public TableWALActionListener(RegionInfo region) {
       this.regionInfo = region;
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
index 192b85d..6105a0d 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
@@ -18,7 +18,11 @@
 
 package org.apache.hadoop.hbase.replication;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -37,7 +41,6 @@ import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.TableName;
@@ -48,6 +51,8 @@ import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
@@ -56,11 +61,9 @@ import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
 import org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
-import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.replication.regionserver.Replication;
 import org.apache.hadoop.hbase.replication.regionserver.ReplicationSource;
 import org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
@@ -70,6 +73,7 @@ import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.JVMClusterUtil;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
 import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.mapreduce.Job;
 import org.junit.Before;
@@ -79,6 +83,7 @@ import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
 
 @Category({ReplicationTests.class, LargeTests.class})
 public class TestReplicationSmallTests extends TestReplicationBase {
@@ -753,8 +758,10 @@ public class TestReplicationSmallTests extends TestReplicationBase {
   public void testCompactionWALEdits() throws Exception {
     WALProtos.CompactionDescriptor compactionDescriptor =
         WALProtos.CompactionDescriptor.getDefaultInstance();
-    HRegionInfo hri = new HRegionInfo(htable1.getName(),
-      HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
+    RegionInfo hri = RegionInfoBuilder.newBuilder(htable1.getName())
+        .setStartKey(HConstants.EMPTY_START_ROW)
+        .setEndKey(HConstants.EMPTY_END_ROW)
+        .build();
     WALEdit edit = WALEdit.createCompaction(hri, compactionDescriptor);
     Replication.scopeWALEdits(new WALKey(), edit,
       htable1.getConfiguration(), null);
@@ -822,7 +829,7 @@ public class TestReplicationSmallTests extends TestReplicationBase {
     final TableName tableName = htable1.getName();
 
     HRegion region = utility1.getMiniHBaseCluster().getRegions(tableName).get(0);
-    HRegionInfo hri = region.getRegionInfo();
+    RegionInfo hri = region.getRegionInfo();
     NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
     for (byte[] fam : htable1.getTableDescriptor().getFamiliesKeys()) {
       scopes.put(fam, 1);
@@ -989,7 +996,7 @@ public class TestReplicationSmallTests extends TestReplicationBase {
     final List<Path> emptyWalPaths = new ArrayList<>();
     long ts = System.currentTimeMillis();
     for (int i = 0; i < numRs; i++) {
-      HRegionInfo regionInfo =
+      RegionInfo regionInfo =
           utility1.getHBaseCluster().getRegions(htable1.getName()).get(0).getRegionInfo();
       WAL wal = utility1.getHBaseCluster().getRegionServer(i).getWAL(regionInfo);
       Path currentWalPath = AbstractFSWALProvider.getCurrentFileName(wal);
@@ -1012,7 +1019,7 @@ public class TestReplicationSmallTests extends TestReplicationBase {
 
     // roll the original wal, which enqueues a new wal behind our empty wal
     for (int i = 0; i < numRs; i++) {
-      HRegionInfo regionInfo =
+      RegionInfo regionInfo =
           utility1.getHBaseCluster().getRegions(htable1.getName()).get(0).getRegionInfo();
       WAL wal = utility1.getHBaseCluster().getRegionServer(i).getWAL(regionInfo);
       wal.rollWriter(true);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java
index 2e3cb5e..7139968 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java
@@ -39,12 +39,10 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CategoryBasedTimeout;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -53,13 +51,16 @@ import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
-import org.junit.Ignore;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 import org.junit.rules.TestRule;
 
+import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
+
 /**
  * Test Export Snapshot Tool
  */
@@ -138,7 +139,7 @@ public class TestExportSnapshot {
   }
 
   protected interface RegionPredicate {
-    boolean evaluate(final HRegionInfo regionInfo);
+    boolean evaluate(final RegionInfo regionInfo);
   }
 
   protected RegionPredicate getBypassRegionPredicate() {
@@ -314,7 +315,7 @@ public class TestExportSnapshot {
     SnapshotReferenceUtil.visitReferencedFiles(conf, fs, exportedSnapshot,
           new SnapshotReferenceUtil.SnapshotVisitor() {
         @Override
-        public void storeFile(final HRegionInfo regionInfo, final String family,
+        public void storeFile(final RegionInfo regionInfo, final String family,
             final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
           if (bypassregionPredicate != null && bypassregionPredicate.evaluate(regionInfo))
             return;

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobExportSnapshot.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobExportSnapshot.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobExportSnapshot.java
index 7407a7d..3de54ff 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobExportSnapshot.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobExportSnapshot.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.hbase.snapshot;
 
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.mob.MobConstants;
 import org.apache.hadoop.hbase.mob.MobUtils;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
@@ -57,7 +57,7 @@ public class TestMobExportSnapshot extends TestExportSnapshot {
   protected RegionPredicate getBypassRegionPredicate() {
     return new RegionPredicate() {
       @Override
-      public boolean evaluate(final HRegionInfo regionInfo) {
+      public boolean evaluate(final RegionInfo regionInfo) {
         return MobUtils.isMobRegionInfo(regionInfo);
       }
     };

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java
index 0369b44..183262d 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java
@@ -19,30 +19,28 @@
 
 package org.apache.hadoop.hbase.rest;
 
-import java.io.IOException;
-import java.util.Map;
-
 import javax.ws.rs.GET;
 import javax.ws.rs.Produces;
 import javax.ws.rs.core.CacheControl;
 import javax.ws.rs.core.Context;
 import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
 import javax.ws.rs.core.Response.ResponseBuilder;
+import javax.ws.rs.core.UriInfo;
+import java.io.IOException;
+import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-
 import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.rest.model.TableInfoModel;
 import org.apache.hadoop.hbase.rest.model.TableRegionModel;
+import org.apache.yetus.audience.InterfaceAudience;
 
 @InterfaceAudience.Private
 public class RegionsResource extends ResourceBase {
@@ -81,11 +79,11 @@ public class RegionsResource extends ResourceBase {
 
       Connection connection = ConnectionFactory.createConnection(servlet.getConfiguration());
       @SuppressWarnings("deprecation")
-      Map<HRegionInfo, ServerName> regions = MetaTableAccessor
+      Map<RegionInfo, ServerName> regions = MetaTableAccessor
           .allTableRegions(connection, tableName);
       connection.close();
-      for (Map.Entry<HRegionInfo,ServerName> e: regions.entrySet()) {
-        HRegionInfo hri = e.getKey();
+      for (Map.Entry<RegionInfo,ServerName> e: regions.entrySet()) {
+        RegionInfo hri = e.getKey();
         ServerName addr = e.getValue();
         model.add(
           new TableRegionModel(tableName.getNameAsString(), hri.getRegionId(),

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
----------------------------------------------------------------------
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
index cba5d3e..b13dafd 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
@@ -31,11 +31,10 @@ import java.util.Set;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.LoadBalancer;
@@ -48,6 +47,8 @@ import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
 import org.apache.hadoop.hbase.master.locking.LockManager;
 import org.apache.hadoop.hbase.net.Address;
 import org.apache.hadoop.hbase.procedure2.LockType;
+import org.apache.yetus.audience.InterfaceAudience;
+
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
 
@@ -114,9 +115,9 @@ public class RSGroupAdminServer implements RSGroupAdmin {
   /**
    * @return List of Regions associated with this <code>server</code>.
    */
-  private List<HRegionInfo> getRegions(final Address server) {
-    LinkedList<HRegionInfo> regions = new LinkedList<>();
-    for (Map.Entry<HRegionInfo, ServerName> el :
+  private List<RegionInfo> getRegions(final Address server) {
+    LinkedList<RegionInfo> regions = new LinkedList<>();
+    for (Map.Entry<RegionInfo, ServerName> el :
         master.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()) {
       if (el.getValue() == null) continue;
       if (el.getValue().getAddress().equals(server)) {
@@ -131,7 +132,7 @@ public class RSGroupAdminServer implements RSGroupAdmin {
     return regions;
   }
 
-  private void addRegion(final LinkedList<HRegionInfo> regions, HRegionInfo hri) {
+  private void addRegion(final LinkedList<RegionInfo> regions, RegionInfo hri) {
     // If meta, move it last otherwise other unassigns fail because meta is not
     // online for them to update state in. This is dodgy. Needs to be made more
     // robust. See TODO below.
@@ -206,8 +207,8 @@ public class RSGroupAdminServer implements RSGroupAdmin {
       for (Iterator<Address> iter = allSevers.iterator(); iter.hasNext();) {
         Address rs = iter.next();
         // Get regions that are associated with this server and filter regions by tables.
-        List<HRegionInfo> regions = new ArrayList<>();
-        for (HRegionInfo region : getRegions(rs)) {
+        List<RegionInfo> regions = new ArrayList<>();
+        for (RegionInfo region : getRegions(rs)) {
           if (!tables.contains(region.getTable())) {
             regions.add(region);
           }
@@ -216,7 +217,7 @@ public class RSGroupAdminServer implements RSGroupAdmin {
         LOG.info("Unassigning " + regions.size() +
                 " region(s) from " + rs + " for server move to " + targetGroupName);
         if (!regions.isEmpty()) {
-          for (HRegionInfo region: regions) {
+          for (RegionInfo region: regions) {
             // Regions might get assigned from tables of target group so we need to filter
             if (!targetGrp.containsTable(region.getTable())) {
               this.master.getAssignmentManager().unassign(region);
@@ -259,7 +260,7 @@ public class RSGroupAdminServer implements RSGroupAdmin {
         } catch (InterruptedException e) {
           throw new IOException("Interrupted when waiting for table lock", e);
         }
-        for (HRegionInfo region :
+        for (RegionInfo region :
                 master.getAssignmentManager().getRegionStates().getRegionsOfTable(table)) {
           ServerName sn = master.getAssignmentManager().getRegionStates().getRegionServerOfRegion(region);
           if (!servers.contains(sn.getAddress())) {
@@ -334,7 +335,7 @@ public class RSGroupAdminServer implements RSGroupAdmin {
         for (Iterator<Address> iter = editableMovedServers.iterator(); iter.hasNext();) {
           Address rs = iter.next();
           // Get regions that are associated with this server.
-          List<HRegionInfo> regions = getRegions(rs);
+          List<RegionInfo> regions = getRegions(rs);
 
           // Unassign regions for a server
           // TODO: This is problematic especially if hbase:meta is in the mix.
@@ -345,7 +346,7 @@ public class RSGroupAdminServer implements RSGroupAdmin {
               " region(s) from " + rs + " for server move to " + targetGroupName);
           if (!regions.isEmpty()) {
             // TODO bulk unassign or throttled unassign?
-            for (HRegionInfo region: regions) {
+            for (RegionInfo region: regions) {
               // Regions might get assigned from tables of target group so we need to filter
               if (!targetGrp.containsTable(region.getTable())) {
                 this.master.getAssignmentManager().unassign(region);
@@ -426,7 +427,7 @@ public class RSGroupAdminServer implements RSGroupAdmin {
         } catch (InterruptedException e) {
           throw new IOException("Interrupted when waiting for table lock", e);
         }
-        for (HRegionInfo region :
+        for (RegionInfo region :
             master.getAssignmentManager().getRegionStates().getRegionsOfTable(table)) {
           master.getAssignmentManager().unassign(region);
         }
@@ -517,7 +518,7 @@ public class RSGroupAdminServer implements RSGroupAdmin {
 
       //We balance per group instead of per table
       List<RegionPlan> plans = new ArrayList<>();
-      for(Map.Entry<TableName, Map<ServerName, List<HRegionInfo>>> tableMap:
+      for(Map.Entry<TableName, Map<ServerName, List<RegionInfo>>> tableMap:
           getRSGroupAssignmentsByTable(groupName).entrySet()) {
         LOG.info("Creating partial plan for table " + tableMap.getKey() + ": "
             + tableMap.getValue());
@@ -599,7 +600,7 @@ public class RSGroupAdminServer implements RSGroupAdmin {
     Map<String, RegionState> rit = Maps.newTreeMap();
     AssignmentManager am = master.getAssignmentManager();
     for(TableName tableName : getRSGroupInfo(groupName).getTables()) {
-      for(HRegionInfo regionInfo: am.getRegionStates().getRegionsOfTable(tableName)) {
+      for(RegionInfo regionInfo: am.getRegionStates().getRegionsOfTable(tableName)) {
         RegionState state = am.getRegionStates().getRegionTransitionState(regionInfo);
         if(state != null) {
           rit.put(regionInfo.getEncodedName(), state);
@@ -609,16 +610,16 @@ public class RSGroupAdminServer implements RSGroupAdmin {
     return rit;
   }
 
-  private Map<TableName, Map<ServerName, List<HRegionInfo>>>
+  private Map<TableName, Map<ServerName, List<RegionInfo>>>
       getRSGroupAssignmentsByTable(String groupName) throws IOException {
-    Map<TableName, Map<ServerName, List<HRegionInfo>>> result = Maps.newHashMap();
+    Map<TableName, Map<ServerName, List<RegionInfo>>> result = Maps.newHashMap();
     RSGroupInfo rsGroupInfo = getRSGroupInfo(groupName);
-    Map<TableName, Map<ServerName, List<HRegionInfo>>> assignments = Maps.newHashMap();
-    for(Map.Entry<HRegionInfo, ServerName> entry:
+    Map<TableName, Map<ServerName, List<RegionInfo>>> assignments = Maps.newHashMap();
+    for(Map.Entry<RegionInfo, ServerName> entry:
         master.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()) {
       TableName currTable = entry.getKey().getTable();
       ServerName currServer = entry.getValue();
-      HRegionInfo currRegion = entry.getKey();
+      RegionInfo currRegion = entry.getKey();
       if (rsGroupInfo.getTables().contains(currTable)) {
         assignments.putIfAbsent(currTable, new HashMap<>());
         assignments.get(currTable).putIfAbsent(currServer, new ArrayList<>());
@@ -626,7 +627,7 @@ public class RSGroupAdminServer implements RSGroupAdmin {
       }
     }
 
-    Map<ServerName, List<HRegionInfo>> serverMap = Maps.newHashMap();
+    Map<ServerName, List<RegionInfo>> serverMap = Maps.newHashMap();
     for(ServerName serverName: master.getServerManager().getOnlineServers().keySet()) {
       if(rsGroupInfo.getServers().contains(serverName.getAddress())) {
         serverMap.put(serverName, Collections.emptyList());


Mime
View raw message