hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From chia7...@apache.org
Subject [08/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base
Date Thu, 28 Sep 2017 12:30:38 GMT
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index b4e5007..e942a02 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -22,8 +22,6 @@ package org.apache.hadoop.hbase.tool;
 import static org.apache.hadoop.hbase.HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT;
 import static org.apache.hadoop.hbase.HConstants.ZOOKEEPER_ZNODE_PARENT;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-
 import java.io.Closeable;
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -62,7 +60,6 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MetaTableAccessor;
@@ -72,13 +69,13 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotEnabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
@@ -96,11 +93,14 @@ import org.apache.hadoop.hbase.zookeeper.ZKConfig;
 import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.ZooKeeper;
 import org.apache.zookeeper.client.ConnectStringParser;
 import org.apache.zookeeper.data.Stat;
 
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+
 /**
  * HBase Canary Tool, that that can be used to do
  * "canary monitoring" of a running HBase cluster.
@@ -210,34 +210,34 @@ public final class Canary implements Tool {
     private Map<String, LongAdder> perTableReadLatency = new HashMap<>();
     private LongAdder writeLatency = new LongAdder();
 
-    public void publishReadFailure(ServerName serverName, HRegionInfo region, Exception e) {
+    public void publishReadFailure(ServerName serverName, RegionInfo region, Exception e) {
       incReadFailureCount();
       LOG.error(String.format("read from region %s on regionserver %s failed", region.getRegionNameAsString(), serverName), e);
     }
 
-    public void publishReadFailure(ServerName serverName, HRegionInfo region, ColumnFamilyDescriptor column, Exception e) {
+    public void publishReadFailure(ServerName serverName, RegionInfo region, ColumnFamilyDescriptor column, Exception e) {
       incReadFailureCount();
       LOG.error(String.format("read from region %s on regionserver %s column family %s failed",
         region.getRegionNameAsString(), serverName, column.getNameAsString()), e);
     }
 
-    public void publishReadTiming(ServerName serverName, HRegionInfo region, ColumnFamilyDescriptor column, long msTime) {
+    public void publishReadTiming(ServerName serverName, RegionInfo region, ColumnFamilyDescriptor column, long msTime) {
       LOG.info(String.format("read from region %s on regionserver %s column family %s in %dms",
         region.getRegionNameAsString(), serverName, column.getNameAsString(), msTime));
     }
 
-    public void publishWriteFailure(ServerName serverName, HRegionInfo region, Exception e) {
+    public void publishWriteFailure(ServerName serverName, RegionInfo region, Exception e) {
       incWriteFailureCount();
       LOG.error(String.format("write to region %s on regionserver %s failed", region.getRegionNameAsString(), serverName), e);
     }
 
-    public void publishWriteFailure(ServerName serverName, HRegionInfo region, ColumnFamilyDescriptor column, Exception e) {
+    public void publishWriteFailure(ServerName serverName, RegionInfo region, ColumnFamilyDescriptor column, Exception e) {
       incWriteFailureCount();
       LOG.error(String.format("write to region %s on regionserver %s column family %s failed",
         region.getRegionNameAsString(), serverName, column.getNameAsString()), e);
     }
 
-    public void publishWriteTiming(ServerName serverName, HRegionInfo region, ColumnFamilyDescriptor column, long msTime) {
+    public void publishWriteTiming(ServerName serverName, RegionInfo region, ColumnFamilyDescriptor column, long msTime) {
       LOG.info(String.format("write to region %s on regionserver %s column family %s in %dms",
         region.getRegionNameAsString(), serverName, column.getNameAsString(), msTime));
     }
@@ -307,14 +307,14 @@ public final class Canary implements Tool {
       READ, WRITE
     }
     private Connection connection;
-    private HRegionInfo region;
+    private RegionInfo region;
     private RegionStdOutSink sink;
     private TaskType taskType;
     private boolean rawScanEnabled;
     private ServerName serverName;
     private LongAdder readWriteLatency;
 
-    RegionTask(Connection connection, HRegionInfo region, ServerName serverName, RegionStdOutSink sink,
+    RegionTask(Connection connection, RegionInfo region, ServerName serverName, RegionStdOutSink sink,
         TaskType taskType, boolean rawScanEnabled, LongAdder rwLatency) {
       this.connection = connection;
       this.region = region;
@@ -476,11 +476,11 @@ public final class Canary implements Tool {
   static class RegionServerTask implements Callable<Void> {
     private Connection connection;
     private String serverName;
-    private HRegionInfo region;
+    private RegionInfo region;
     private RegionServerStdOutSink sink;
     private AtomicLong successes;
 
-    RegionServerTask(Connection connection, String serverName, HRegionInfo region,
+    RegionServerTask(Connection connection, String serverName, RegionInfo region,
         RegionServerStdOutSink sink, AtomicLong successes) {
       this.connection = connection;
       this.serverName = serverName;
@@ -1188,7 +1188,7 @@ public final class Canary implements Tool {
         numberOfServers -= 1;
       }
 
-      List<Pair<HRegionInfo, ServerName>> pairs =
+      List<Pair<RegionInfo, ServerName>> pairs =
           MetaTableAccessor.getTableRegionsAndLocations(connection, writeTableName);
       int numberOfRegions = pairs.size();
       if (numberOfRegions < numberOfServers * regionsLowerLimit
@@ -1198,7 +1198,7 @@ public final class Canary implements Tool {
         createWriteTable(numberOfServers);
       }
       HashSet<ServerName> serverSet = new HashSet<>();
-      for (Pair<HRegionInfo, ServerName> pair : pairs) {
+      for (Pair<RegionInfo, ServerName> pair : pairs) {
         serverSet.add(pair.getSecond());
       }
       int numberOfCoveredServers = serverSet.size();
@@ -1273,7 +1273,7 @@ public final class Canary implements Tool {
       regionLocator = admin.getConnection().getRegionLocator(tableDesc.getTableName());
       for (HRegionLocation location : regionLocator.getAllRegionLocations()) {
         ServerName rs = location.getServerName();
-        HRegionInfo region = location.getRegionInfo();
+        RegionInfo region = location.getRegionInfo();
         tasks.add(new RegionTask(admin.getConnection(), region, rs, (RegionStdOutSink) sink, taskType, rawScanEnabled,
           rwLatency));
       }
@@ -1376,7 +1376,7 @@ public final class Canary implements Tool {
           LOG.error("Run RegionServerMonitor failed!", e);
           this.errorCode = ERROR_EXIT_CODE;
         }
-        Map<String, List<HRegionInfo>> rsAndRMap = this.filterRegionServerByName();
+        Map<String, List<RegionInfo>> rsAndRMap = this.filterRegionServerByName();
         this.initialized = true;
         this.monitorRegionServers(rsAndRMap, regionServerSink);
       }
@@ -1416,18 +1416,18 @@ public final class Canary implements Tool {
       return foundTableNames.isEmpty();
     }
 
-    private void monitorRegionServers(Map<String, List<HRegionInfo>> rsAndRMap, RegionServerStdOutSink regionServerSink) {
+    private void monitorRegionServers(Map<String, List<RegionInfo>> rsAndRMap, RegionServerStdOutSink regionServerSink) {
       List<RegionServerTask> tasks = new ArrayList<>();
       Map<String, AtomicLong> successMap = new HashMap<>();
       Random rand = new Random();
-      for (Map.Entry<String, List<HRegionInfo>> entry : rsAndRMap.entrySet()) {
+      for (Map.Entry<String, List<RegionInfo>> entry : rsAndRMap.entrySet()) {
         String serverName = entry.getKey();
         AtomicLong successes = new AtomicLong(0);
         successMap.put(serverName, successes);
         if (entry.getValue().isEmpty()) {
           LOG.error(String.format("Regionserver not serving any regions - %s", serverName));
         } else if (this.allRegions) {
-          for (HRegionInfo region : entry.getValue()) {
+          for (RegionInfo region : entry.getValue()) {
             tasks.add(new RegionServerTask(this.connection,
                 serverName,
                 region,
@@ -1436,7 +1436,7 @@ public final class Canary implements Tool {
           }
         } else {
           // random select a region if flag not set
-          HRegionInfo region = entry.getValue().get(rand.nextInt(entry.getValue().size()));
+          RegionInfo region = entry.getValue().get(rand.nextInt(entry.getValue().size()));
           tasks.add(new RegionServerTask(this.connection,
               serverName,
               region,
@@ -1454,7 +1454,7 @@ public final class Canary implements Tool {
           }
         }
         if (this.allRegions) {
-          for (Map.Entry<String, List<HRegionInfo>> entry : rsAndRMap.entrySet()) {
+          for (Map.Entry<String, List<RegionInfo>> entry : rsAndRMap.entrySet()) {
             String serverName = entry.getKey();
             LOG.info("Successfully read " + successMap.get(serverName) + " regions out of "
                     + entry.getValue().size() + " on regionserver:" + serverName);
@@ -1466,14 +1466,14 @@ public final class Canary implements Tool {
       }
     }
 
-    private Map<String, List<HRegionInfo>> filterRegionServerByName() {
-      Map<String, List<HRegionInfo>> regionServerAndRegionsMap = this.getAllRegionServerByName();
+    private Map<String, List<RegionInfo>> filterRegionServerByName() {
+      Map<String, List<RegionInfo>> regionServerAndRegionsMap = this.getAllRegionServerByName();
       regionServerAndRegionsMap = this.doFilterRegionServerByName(regionServerAndRegionsMap);
       return regionServerAndRegionsMap;
     }
 
-    private Map<String, List<HRegionInfo>> getAllRegionServerByName() {
-      Map<String, List<HRegionInfo>> rsAndRMap = new HashMap<>();
+    private Map<String, List<RegionInfo>> getAllRegionServerByName() {
+      Map<String, List<RegionInfo>> rsAndRMap = new HashMap<>();
       Table table = null;
       RegionLocator regionLocator = null;
       try {
@@ -1481,7 +1481,7 @@ public final class Canary implements Tool {
           LOG.debug(String.format("reading list of tables and locations"));
         }
         HTableDescriptor[] tableDescs = this.admin.listTables();
-        List<HRegionInfo> regions = null;
+        List<RegionInfo> regions = null;
         for (HTableDescriptor tableDesc : tableDescs) {
           table = this.admin.getConnection().getTable(tableDesc.getTableName());
           regionLocator = this.admin.getConnection().getRegionLocator(tableDesc.getTableName());
@@ -1489,7 +1489,7 @@ public final class Canary implements Tool {
           for (HRegionLocation location : regionLocator.getAllRegionLocations()) {
             ServerName rs = location.getServerName();
             String rsName = rs.getHostname();
-            HRegionInfo r = location.getRegionInfo();
+            RegionInfo r = location.getRegionInfo();
 
             if (rsAndRMap.containsKey(rsName)) {
               regions = rsAndRMap.get(rsName);
@@ -1507,7 +1507,7 @@ public final class Canary implements Tool {
             .getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers()) {
           String rsName = rs.getHostname();
           if (!rsAndRMap.containsKey(rsName)) {
-            rsAndRMap.put(rsName, Collections.<HRegionInfo> emptyList());
+            rsAndRMap.put(rsName, Collections.<RegionInfo> emptyList());
           }
         }
       } catch (IOException e) {
@@ -1527,10 +1527,10 @@ public final class Canary implements Tool {
       return rsAndRMap;
     }
 
-    private Map<String, List<HRegionInfo>> doFilterRegionServerByName(
-        Map<String, List<HRegionInfo>> fullRsAndRMap) {
+    private Map<String, List<RegionInfo>> doFilterRegionServerByName(
+        Map<String, List<RegionInfo>> fullRsAndRMap) {
 
-      Map<String, List<HRegionInfo>> filteredRsAndRMap = null;
+      Map<String, List<RegionInfo>> filteredRsAndRMap = null;
 
       if (this.targets != null && this.targets.length > 0) {
         filteredRsAndRMap = new HashMap<>();
@@ -1541,7 +1541,7 @@ public final class Canary implements Tool {
           if (this.useRegExp) {
             regExpFound = false;
             pattern = Pattern.compile(rsName);
-            for (Map.Entry<String, List<HRegionInfo>> entry : fullRsAndRMap.entrySet()) {
+            for (Map.Entry<String, List<RegionInfo>> entry : fullRsAndRMap.entrySet()) {
               matcher = pattern.matcher(entry.getKey());
               if (matcher.matches()) {
                 filteredRsAndRMap.put(entry.getKey(), entry.getValue());

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 0c95e7e..4afdcb9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -17,18 +17,6 @@
  */
 package org.apache.hadoop.hbase.util;
 
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-import org.apache.hadoop.hbase.shaded.com.google.common.base.Joiner;
-import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableList;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Multimap;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Ordering;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.TreeMultimap;
-
 import java.io.Closeable;
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -100,19 +88,23 @@ import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.yetus.audience.InterfaceStability;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.io.FileLink;
 import org.apache.hadoop.hbase.io.HFileLink;
@@ -120,8 +112,6 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
 import org.apache.hadoop.hbase.master.RegionState;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
@@ -147,8 +137,20 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.yetus.audience.InterfaceStability;
 import org.apache.zookeeper.KeeperException;
 
+import org.apache.hadoop.hbase.shaded.com.google.common.base.Joiner;
+import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableList;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Multimap;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Ordering;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.TreeMultimap;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
+
 /**
  * HBaseFsck (hbck) is a tool for checking and repairing region consistency and
  * table integrity problems in a corrupted HBase.
@@ -833,11 +835,11 @@ public class HBaseFsck extends Configured implements Closeable {
   public void checkRegionBoundaries() {
     try {
       ByteArrayComparator comparator = new ByteArrayComparator();
-      List<HRegionInfo> regions = MetaTableAccessor.getAllRegions(connection, true);
+      List<RegionInfo> regions = MetaTableAccessor.getAllRegions(connection, true);
       final RegionBoundariesInformation currentRegionBoundariesInformation =
           new RegionBoundariesInformation();
       Path hbaseRoot = FSUtils.getRootDir(getConf());
-      for (HRegionInfo regionInfo : regions) {
+      for (RegionInfo regionInfo : regions) {
         Path tableDir = FSUtils.getTableDir(hbaseRoot, regionInfo.getTable());
         currentRegionBoundariesInformation.regionName = regionInfo.getRegionName();
         // For each region, get the start and stop key from the META and compare them to the
@@ -1005,10 +1007,12 @@ public class HBaseFsck extends Configured implements Closeable {
         Bytes.toString(orphanRegionRange.getSecond()) + ")");
 
     // create new region on hdfs. move data into place.
-    HRegionInfo hri = new HRegionInfo(template.getTableName(), orphanRegionRange.getFirst(),
-        Bytes.add(orphanRegionRange.getSecond(), new byte[1]));
-    LOG.info("Creating new region : " + hri);
-    HRegion region = HBaseFsckRepair.createHDFSRegionDir(getConf(), hri, template);
+    RegionInfo regionInfo = RegionInfoBuilder.newBuilder(template.getTableName())
+        .setStartKey(orphanRegionRange.getFirst())
+        .setEndKey(Bytes.add(orphanRegionRange.getSecond(), new byte[1]))
+        .build();
+    LOG.info("Creating new region : " + regionInfo);
+    HRegion region = HBaseFsckRepair.createHDFSRegionDir(getConf(), regionInfo, template);
     Path target = region.getRegionFileSystem().getRegionDir();
 
     // rename all the data to new region
@@ -1232,7 +1236,7 @@ public class HBaseFsck extends Configured implements Closeable {
   private void loadHdfsRegioninfo(HbckInfo hbi) throws IOException {
     Path regionDir = hbi.getHdfsRegionDir();
     if (regionDir == null) {
-      if (hbi.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
+      if (hbi.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
         // Log warning only for default/ primary replica with no region dir
         LOG.warn("No HDFS region dir found: " + hbi + " meta=" + hbi.metaEntry);
       }
@@ -1245,8 +1249,8 @@ public class HBaseFsck extends Configured implements Closeable {
     }
 
     FileSystem fs = FileSystem.get(getConf());
-    HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
-    LOG.debug("HRegionInfo read: " + hri.toString());
+    RegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
+    LOG.debug("RegionInfo read: " + hri.toString());
     hbi.hdfsEntry.hri = hri;
   }
 
@@ -1468,7 +1472,7 @@ public class HBaseFsck extends Configured implements Closeable {
   private HRegion createNewMeta(String walFactoryID) throws IOException {
     Path rootdir = FSUtils.getRootDir(getConf());
     Configuration c = getConf();
-    HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
+    RegionInfo metaHRI = RegionInfoBuilder.FIRST_META_REGIONINFO;
     TableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
     MasterFileSystem.setInfoFamilyCachingForMeta(metaDescriptor, false);
     // The WAL subsystem will use the default rootDir rather than the passed in rootDir
@@ -1518,7 +1522,7 @@ public class HBaseFsck extends Configured implements Closeable {
 
         // add the row directly to meta.
         HbckInfo hi = his.iterator().next();
-        HRegionInfo hri = hi.getHdfsHRI(); // hi.metaEntry;
+        RegionInfo hri = hi.getHdfsHRI(); // hi.metaEntry;
         Put p = MetaTableAccessor.makePutFromRegionInfo(hri);
         puts.add(p);
       }
@@ -1974,7 +1978,7 @@ public class HBaseFsck extends Configured implements Closeable {
 
     List<CheckRegionConsistencyWorkItem> workItems = new ArrayList<>(regionInfoMap.size());
     for (java.util.Map.Entry<String, HbckInfo> e: regionInfoMap.entrySet()) {
-      if (e.getValue().getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
+      if (e.getValue().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
         workItems.add(new CheckRegionConsistencyWorkItem(e.getKey(), e.getValue()));
       }
     }
@@ -1986,7 +1990,7 @@ public class HBaseFsck extends Configured implements Closeable {
     // deployed/undeployed replicas.
     List<CheckRegionConsistencyWorkItem> replicaWorkItems = new ArrayList<>(regionInfoMap.size());
     for (java.util.Map.Entry<String, HbckInfo> e: regionInfoMap.entrySet()) {
-      if (e.getValue().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
+      if (e.getValue().getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
         replicaWorkItems.add(new CheckRegionConsistencyWorkItem(e.getKey(), e.getValue()));
       }
     }
@@ -2169,9 +2173,10 @@ public class HBaseFsck extends Configured implements Closeable {
     d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
     mutations.add(d);
 
-    HRegionInfo hri = new HRegionInfo(hi.metaEntry);
-    hri.setOffline(false);
-    hri.setSplit(false);
+    RegionInfo hri = RegionInfoBuilder.newBuilder(hi.metaEntry)
+        .setOffline(false)
+        .setSplit(false)
+        .build();
     Put p = MetaTableAccessor.makePutFromRegionInfo(hri);
     mutations.add(p);
 
@@ -2219,13 +2224,13 @@ public class HBaseFsck extends Configured implements Closeable {
   private void undeployRegions(HbckInfo hi) throws IOException, InterruptedException {
     undeployRegionsForHbi(hi);
     // undeploy replicas of the region (but only if the method is invoked for the primary)
-    if (hi.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
+    if (hi.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
       return;
     }
     int numReplicas = admin.getTableDescriptor(hi.getTableName()).getRegionReplication();
     for (int i = 1; i < numReplicas; i++) {
       if (hi.getPrimaryHRIForDeployedReplica() == null) continue;
-      HRegionInfo hri = RegionReplicaUtil.getRegionInfoForReplica(
+      RegionInfo hri = RegionReplicaUtil.getRegionInfoForReplica(
           hi.getPrimaryHRIForDeployedReplica(), i);
       HbckInfo h = regionInfoMap.get(hri.getEncodedName());
       if (h != null) {
@@ -2274,7 +2279,7 @@ public class HBaseFsck extends Configured implements Closeable {
     get.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
     get.addColumn(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER);
     // also get the locations of the replicas to close if the primary region is being closed
-    if (hi.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
+    if (hi.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
       int numReplicas = admin.getTableDescriptor(hi.getTableName()).getRegionReplication();
       for (int i = 0; i < numReplicas; i++) {
         get.addColumn(HConstants.CATALOG_FAMILY, MetaTableAccessor.getServerColumn(i));
@@ -2296,7 +2301,7 @@ public class HBaseFsck extends Configured implements Closeable {
             + "have handle to reach it.");
         continue;
       }
-      HRegionInfo hri = h.getRegionInfo();
+      RegionInfo hri = h.getRegionInfo();
       if (hri == null) {
         LOG.warn("Unable to close region " + hi.getRegionNameAsString()
             + " because hbase:meta had invalid or missing "
@@ -2317,7 +2322,7 @@ public class HBaseFsck extends Configured implements Closeable {
       errors.print(msg);
       undeployRegions(hbi);
       setShouldRerun();
-      HRegionInfo hri = hbi.getHdfsHRI();
+      RegionInfo hri = hbi.getHdfsHRI();
       if (hri == null) {
         hri = hbi.metaEntry;
       }
@@ -2325,7 +2330,7 @@ public class HBaseFsck extends Configured implements Closeable {
       HBaseFsckRepair.waitUntilAssigned(admin, hri);
 
       // also assign replicas if needed (do it only when this call operates on a primary replica)
-      if (hbi.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) return;
+      if (hbi.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) return;
       int replicationCount = admin.getTableDescriptor(hri.getTable()).getRegionReplication();
       for (int i = 1; i < replicationCount; i++) {
         hri = RegionReplicaUtil.getRegionInfoForReplica(hri, i);
@@ -2413,10 +2418,10 @@ public class HBaseFsck extends Configured implements Closeable {
           return;
         }
 
-        HRegionInfo hri = hbi.getHdfsHRI();
+        RegionInfo hri = hbi.getHdfsHRI();
         TableInfo tableInfo = tablesInfo.get(hri.getTable());
 
-        for (HRegionInfo region : tableInfo.getRegionsFromMeta()) {
+        for (RegionInfo region : tableInfo.getRegionsFromMeta()) {
           if (Bytes.compareTo(region.getStartKey(), hri.getStartKey()) <= 0
               && (region.getEndKey().length == 0 || Bytes.compareTo(region.getEndKey(),
                 hri.getEndKey()) >= 0)
@@ -2454,7 +2459,7 @@ public class HBaseFsck extends Configured implements Closeable {
       errors.reportError(ERROR_CODE.NOT_IN_META, "Region " + descriptiveName
           + " not in META, but deployed on " + Joiner.on(", ").join(hbi.deployedOn));
       debugLsr(hbi.getHdfsRegionDir());
-      if (hbi.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
+      if (hbi.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
         // for replicas, this means that we should undeploy the region (we would have
         // gone over the primaries and fixed meta holes in first phase under
         // checkAndFixConsistency; we shouldn't get the condition !inMeta at
@@ -2463,7 +2468,7 @@ public class HBaseFsck extends Configured implements Closeable {
           undeployRegionsForHbi(hbi);
         }
       }
-      if (shouldFixMeta() && hbi.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
+      if (shouldFixMeta() && hbi.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
         if (!hbi.isHdfsRegioninfoPresent()) {
           LOG.error("This should have been repaired in table integrity repair phase");
           return;
@@ -2493,7 +2498,7 @@ public class HBaseFsck extends Configured implements Closeable {
 
       // For Replica region, we need to do a similar check. If replica is not split successfully,
       // error is going to be reported against primary daughter region.
-      if (hbi.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
+      if (hbi.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
         LOG.info("Region " + descriptiveName + " is a split parent in META, in HDFS, "
             + "and not deployed on any region server. This may be transient.");
         hbi.setSkipChecks(true);
@@ -2784,7 +2789,7 @@ public class HBaseFsck extends Configured implements Closeable {
       TreeMultimap.create(RegionSplitCalculator.BYTES_COMPARATOR, cmp);
 
     // list of regions derived from meta entries.
-    private ImmutableList<HRegionInfo> regionsFromMeta = null;
+    private ImmutableList<RegionInfo> regionsFromMeta = null;
 
     TableInfo(TableName name) {
       this.tableName = name;
@@ -2808,7 +2813,7 @@ public class HBaseFsck extends Configured implements Closeable {
       if (Bytes.equals(hir.getEndKey(), HConstants.EMPTY_END_ROW)) {
         // end key is absolute end key, just add it.
         // ignore replicas other than primary for these checks
-        if (hir.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) sc.add(hir);
+        if (hir.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) sc.add(hir);
         return;
       }
 
@@ -2826,7 +2831,7 @@ public class HBaseFsck extends Configured implements Closeable {
 
       // main case, add to split calculator
       // ignore replicas other than primary for these checks
-      if (hir.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) sc.add(hir);
+      if (hir.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) sc.add(hir);
     }
 
     public void addServer(ServerName server) {
@@ -2841,18 +2846,18 @@ public class HBaseFsck extends Configured implements Closeable {
       return sc.getStarts().size() + backwards.size();
     }
 
-    public synchronized ImmutableList<HRegionInfo> getRegionsFromMeta() {
+    public synchronized ImmutableList<RegionInfo> getRegionsFromMeta() {
       // lazy loaded, synchronized to ensure a single load
       if (regionsFromMeta == null) {
-        List<HRegionInfo> regions = new ArrayList<>();
+        List<RegionInfo> regions = new ArrayList<>();
         for (HbckInfo h : HBaseFsck.this.regionInfoMap.values()) {
           if (tableName.equals(h.getTableName())) {
             if (h.metaEntry != null) {
-              regions.add((HRegionInfo) h.metaEntry);
+              regions.add(h.metaEntry);
             }
           }
         }
-        regionsFromMeta = Ordering.natural().immutableSortedCopy(regions);
+        regionsFromMeta = Ordering.from(RegionInfo.COMPARATOR).immutableSortedCopy(regions);
       }
 
       return regionsFromMeta;
@@ -2968,8 +2973,10 @@ public class HBaseFsck extends Configured implements Closeable {
             getTableInfo(), next);
         TableDescriptor htd = getTableInfo().getHTD();
         // from special EMPTY_START_ROW to next region's startKey
-        HRegionInfo newRegion = new HRegionInfo(htd.getTableName(),
-            HConstants.EMPTY_START_ROW, next.getStartKey());
+        RegionInfo newRegion = RegionInfoBuilder.newBuilder(htd.getTableName())
+            .setStartKey(HConstants.EMPTY_START_ROW)
+            .setEndKey(next.getStartKey())
+            .build();
 
         // TODO test
         HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
@@ -2985,8 +2992,10 @@ public class HBaseFsck extends Configured implements Closeable {
                 + "region and regioninfo in HDFS to plug the hole.", getTableInfo());
         TableDescriptor htd = getTableInfo().getHTD();
         // from curEndKey to EMPTY_START_ROW
-        HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), curEndKey,
-            HConstants.EMPTY_START_ROW);
+        RegionInfo newRegion = RegionInfoBuilder.newBuilder(htd.getTableName())
+            .setStartKey(curEndKey)
+            .setEndKey(HConstants.EMPTY_START_ROW)
+            .build();
 
         HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
         LOG.info("Table region end key was not empty.  Created new empty region: " + newRegion
@@ -3008,7 +3017,10 @@ public class HBaseFsck extends Configured implements Closeable {
                 + ".  Creating a new regioninfo and region "
                 + "dir in hdfs to plug the hole.");
         TableDescriptor htd = getTableInfo().getHTD();
-        HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), holeStartKey, holeStopKey);
+        RegionInfo newRegion = RegionInfoBuilder.newBuilder(htd.getTableName())
+            .setStartKey(holeStartKey)
+            .setEndKey(holeStopKey)
+            .build();
         HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
         LOG.info("Plugged hole by creating new empty region: "+ newRegion + " " +region);
         fixes++;
@@ -3210,8 +3222,10 @@ public class HBaseFsck extends Configured implements Closeable {
         // create new empty container region.
         TableDescriptor htd = getTableInfo().getHTD();
         // from start key to end Key
-        HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), range.getFirst(),
-            range.getSecond());
+        RegionInfo newRegion = RegionInfoBuilder.newBuilder(htd.getTableName())
+            .setStartKey(range.getFirst())
+            .setEndKey(range.getSecond())
+            .build();
         HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
         LOG.info("[" + thread + "] Created new empty container region: " +
             newRegion + " to contain regions: " + Joiner.on(",").join(overlap));
@@ -3344,10 +3358,10 @@ public class HBaseFsck extends Configured implements Closeable {
           ArrayList<HbckInfo> subRange = new ArrayList<>(ranges);
           //  this dumb and n^2 but this shouldn't happen often
           for (HbckInfo r1 : ranges) {
-            if (r1.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) continue;
+            if (r1.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) continue;
             subRange.remove(r1);
             for (HbckInfo r2 : subRange) {
-              if (r2.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) continue;
+              if (r2.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) continue;
               // general case of same start key
               if (Bytes.compareTo(r1.getStartKey(), r2.getStartKey())==0) {
                 handler.handleDuplicateStartKeys(r1,r2);
@@ -3642,8 +3656,8 @@ public class HBaseFsck extends Configured implements Closeable {
       errors.print("Trying to fix a problem with hbase:meta..");
       setShouldRerun();
       // try to fix it (treat it as unassigned region)
-      HRegionInfo h = RegionReplicaUtil.getRegionInfoForReplica(
-          HRegionInfo.FIRST_META_REGIONINFO, replicaId);
+      RegionInfo h = RegionReplicaUtil.getRegionInfoForReplica(
+          RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId);
       HBaseFsckRepair.fixUnassigned(admin, h);
       HBaseFsckRepair.waitUntilAssigned(admin, h);
     }
@@ -3679,19 +3693,19 @@ public class HBaseFsck extends Configured implements Closeable {
             return true;
           }
           ServerName sn = null;
-          if (rl.getRegionLocation(HRegionInfo.DEFAULT_REPLICA_ID) == null ||
-              rl.getRegionLocation(HRegionInfo.DEFAULT_REPLICA_ID).getRegionInfo() == null) {
+          if (rl.getRegionLocation(RegionInfo.DEFAULT_REPLICA_ID) == null ||
+              rl.getRegionLocation(RegionInfo.DEFAULT_REPLICA_ID).getRegionInfo() == null) {
             emptyRegionInfoQualifiers.add(result);
             errors.reportError(ERROR_CODE.EMPTY_META_CELL,
               "Empty REGIONINFO_QUALIFIER found in hbase:meta");
             return true;
           }
-          HRegionInfo hri = rl.getRegionLocation(HRegionInfo.DEFAULT_REPLICA_ID).getRegionInfo();
+          RegionInfo hri = rl.getRegionLocation(RegionInfo.DEFAULT_REPLICA_ID).getRegionInfo();
           if (!(isTableIncluded(hri.getTable())
               || hri.isMetaRegion())) {
             return true;
           }
-          PairOfSameType<HRegionInfo> daughters = MetaTableAccessor.getDaughterRegions(result);
+          PairOfSameType<RegionInfo> daughters = MetaTableAccessor.getDaughterRegions(result);
           for (HRegionLocation h : rl.getRegionLocations()) {
             if (h == null || h.getRegionInfo() == null) {
               continue;
@@ -3700,7 +3714,7 @@ public class HBaseFsck extends Configured implements Closeable {
             hri = h.getRegionInfo();
 
             MetaEntry m = null;
-            if (hri.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
+            if (hri.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
               m = new MetaEntry(hri, sn, ts, daughters.getFirst(), daughters.getSecond());
             } else {
               m = new MetaEntry(hri, sn, ts, null, null);
@@ -3714,8 +3728,8 @@ public class HBaseFsck extends Configured implements Closeable {
               throw new IOException("Two entries in hbase:meta are same " + previous);
             }
           }
-          PairOfSameType<HRegionInfo> mergeRegions = MetaTableAccessor.getMergeRegions(result);
-          for (HRegionInfo mergeRegion : new HRegionInfo[] {
+          PairOfSameType<RegionInfo> mergeRegions = MetaTableAccessor.getMergeRegions(result);
+          for (RegionInfo mergeRegion : new RegionInfo[] {
               mergeRegions.getFirst(), mergeRegions.getSecond() }) {
             if (mergeRegion != null) {
               // This region is already been merged
@@ -3751,14 +3765,14 @@ public class HBaseFsck extends Configured implements Closeable {
   static class MetaEntry extends HRegionInfo {
     ServerName regionServer;   // server hosting this region
     long modTime;          // timestamp of most recent modification metadata
-    HRegionInfo splitA, splitB; //split daughters
+    RegionInfo splitA, splitB; //split daughters
 
-    public MetaEntry(HRegionInfo rinfo, ServerName regionServer, long modTime) {
+    public MetaEntry(RegionInfo rinfo, ServerName regionServer, long modTime) {
       this(rinfo, regionServer, modTime, null, null);
     }
 
-    public MetaEntry(HRegionInfo rinfo, ServerName regionServer, long modTime,
-        HRegionInfo splitA, HRegionInfo splitB) {
+    public MetaEntry(RegionInfo rinfo, ServerName regionServer, long modTime,
+        RegionInfo splitA, RegionInfo splitB) {
       super(rinfo);
       this.regionServer = regionServer;
       this.modTime = modTime;
@@ -3800,7 +3814,7 @@ public class HBaseFsck extends Configured implements Closeable {
    * Stores the regioninfo entries from HDFS
    */
   static class HdfsEntry {
-    HRegionInfo hri;
+    RegionInfo hri;
     Path hdfsRegionDir = null;
     long hdfsRegionDirModTime  = 0;
     boolean hdfsRegioninfoFilePresent = false;
@@ -3811,7 +3825,7 @@ public class HBaseFsck extends Configured implements Closeable {
    * Stores the regioninfo retrieved from Online region servers.
    */
   static class OnlineEntry {
-    HRegionInfo hri;
+    RegionInfo hri;
     ServerName hsa;
 
     @Override
@@ -3831,8 +3845,8 @@ public class HBaseFsck extends Configured implements Closeable {
     private List<ServerName> deployedOn = Lists.newArrayList(); // info on RS's
     private boolean skipChecks = false; // whether to skip further checks to this region info.
     private boolean isMerged = false;// whether this region has already been merged into another one
-    private int deployedReplicaId = HRegionInfo.DEFAULT_REPLICA_ID;
-    private HRegionInfo primaryHRIForDeployedReplica = null;
+    private int deployedReplicaId = RegionInfo.DEFAULT_REPLICA_ID;
+    private RegionInfo primaryHRIForDeployedReplica = null;
 
     HbckInfo(MetaEntry metaEntry) {
       this.metaEntry = metaEntry;
@@ -3842,7 +3856,7 @@ public class HBaseFsck extends Configured implements Closeable {
       return metaEntry != null? metaEntry.getReplicaId(): deployedReplicaId;
     }
 
-    public synchronized void addServer(HRegionInfo hri, ServerName server) {
+    public synchronized void addServer(RegionInfo hri, ServerName server) {
       OnlineEntry rse = new OnlineEntry() ;
       rse.hri = hri;
       rse.hsa = server;
@@ -3937,7 +3951,7 @@ public class HBaseFsck extends Configured implements Closeable {
       }
     }
 
-    public HRegionInfo getPrimaryHRIForDeployedReplica() {
+    public RegionInfo getPrimaryHRIForDeployedReplica() {
       return primaryHRIForDeployedReplica;
     }
 
@@ -3969,7 +3983,7 @@ public class HBaseFsck extends Configured implements Closeable {
       return hdfsEntry.hdfsRegionDirModTime;
     }
 
-    HRegionInfo getHdfsHRI() {
+    RegionInfo getHdfsHRI() {
       if (hdfsEntry == null) {
         return null;
       }
@@ -4271,13 +4285,13 @@ public class HBaseFsck extends Configured implements Closeable {
         BlockingInterface server = connection.getAdmin(rsinfo);
 
         // list all online regions from this region server
-        List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(server);
+        List<RegionInfo> regions = ProtobufUtil.getOnlineRegions(server);
         regions = filterRegions(regions);
 
         if (details) {
           errors.detail("RegionServer: " + rsinfo.getServerName() +
                            " number of regions: " + regions.size());
-          for (HRegionInfo rinfo: regions) {
+          for (RegionInfo rinfo: regions) {
             errors.detail("  " + rinfo.getRegionNameAsString() +
                              " id: " + rinfo.getRegionId() +
                              " encoded_name: " + rinfo.getEncodedName() +
@@ -4287,7 +4301,7 @@ public class HBaseFsck extends Configured implements Closeable {
         }
 
         // check to see if the existence of this region matches the region in META
-        for (HRegionInfo r:regions) {
+        for (RegionInfo r:regions) {
           HbckInfo hbi = hbck.getOrCreateInfo(r.getEncodedName());
           hbi.addServer(r, rsinfo);
         }
@@ -4299,9 +4313,9 @@ public class HBaseFsck extends Configured implements Closeable {
       return null;
     }
 
-    private List<HRegionInfo> filterRegions(List<HRegionInfo> regions) {
-      List<HRegionInfo> ret = Lists.newArrayList();
-      for (HRegionInfo hri : regions) {
+    private List<RegionInfo> filterRegions(List<RegionInfo> regions) {
+      List<RegionInfo> ret = Lists.newArrayList();
+      for (RegionInfo hri : regions) {
         if (hri.isMetaTable() || (!hbck.checkMetaOnly
             && hbck.isTableIncluded(hri.getTable()))) {
           ret.add(hri);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
index 651d6e5..afb6c5b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
@@ -28,23 +28,23 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.ServerManager;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 
 /**
@@ -64,18 +64,16 @@ public class HBaseFsckRepair {
    * @param region Region to undeploy
    * @param servers list of Servers to undeploy from
    */
-  public static void fixMultiAssignment(Connection connection, HRegionInfo region,
+  public static void fixMultiAssignment(Connection connection, RegionInfo region,
       List<ServerName> servers)
   throws IOException, KeeperException, InterruptedException {
-    HRegionInfo actualRegion = new HRegionInfo(region);
-
     // Close region on the servers silently
     for(ServerName server : servers) {
-      closeRegionSilentlyAndWait(connection, server, actualRegion);
+      closeRegionSilentlyAndWait(connection, server, region);
     }
 
     // Force ZK node to OFFLINE so master assigns
-    forceOfflineInZK(connection.getAdmin(), actualRegion);
+    forceOfflineInZK(connection.getAdmin(), region);
   }
 
   /**
@@ -90,12 +88,10 @@ public class HBaseFsckRepair {
    * @throws IOException
    * @throws KeeperException
    */
-  public static void fixUnassigned(Admin admin, HRegionInfo region)
+  public static void fixUnassigned(Admin admin, RegionInfo region)
       throws IOException, KeeperException, InterruptedException {
-    HRegionInfo actualRegion = new HRegionInfo(region);
-
     // Force ZK node to OFFLINE so master assigns
-    forceOfflineInZK(admin, actualRegion);
+    forceOfflineInZK(admin, region);
   }
 
   /**
@@ -103,14 +99,12 @@ public class HBaseFsckRepair {
    * in ZK to have HBCK_CODE_NAME as the server.  This is a special case in
    * the AssignmentManager that attempts an assign call by the master.
    *
-   * @see org.apache.hadoop.hbase.master.AssignementManager#handleHBCK
-   *
    * This doesn't seem to work properly in the updated version of 0.92+'s hbck
    * so we use assign to force the region into transition.  This has the
-   * side-effect of requiring a HRegionInfo that considers regionId (timestamp)
+   * side-effect of requiring a RegionInfo that considers regionId (timestamp)
    * in comparators that is addressed by HBASE-5563.
    */
-  private static void forceOfflineInZK(Admin admin, final HRegionInfo region)
+  private static void forceOfflineInZK(Admin admin, final RegionInfo region)
   throws ZooKeeperConnectionException, KeeperException, IOException, InterruptedException {
     admin.assign(region.getRegionName());
   }
@@ -119,7 +113,7 @@ public class HBaseFsckRepair {
    * Should we check all assignments or just not in RIT?
    */
   public static void waitUntilAssigned(Admin admin,
-      HRegionInfo region) throws IOException, InterruptedException {
+      RegionInfo region) throws IOException, InterruptedException {
     long timeout = admin.getConfiguration().getLong("hbase.hbck.assign.timeout", 120000);
     long expiration = timeout + EnvironmentEdgeManager.currentTime();
     while (EnvironmentEdgeManager.currentTime() < expiration) {
@@ -127,7 +121,7 @@ public class HBaseFsckRepair {
         boolean inTransition = false;
         for (RegionState rs : admin.getClusterStatus(EnumSet.of(Option.REGIONS_IN_TRANSITION))
                                    .getRegionsInTransition()) {
-          if (rs.getRegion().equals(region)) {
+          if (RegionInfo.COMPARATOR.compare(rs.getRegion(), region) == 0) {
             inTransition = true;
             break;
           }
@@ -155,7 +149,7 @@ public class HBaseFsckRepair {
    */
   @SuppressWarnings("deprecation")
   public static void closeRegionSilentlyAndWait(Connection connection,
-      ServerName server, HRegionInfo region) throws IOException, InterruptedException {
+      ServerName server, RegionInfo region) throws IOException, InterruptedException {
     long timeout = connection.getConfiguration()
       .getLong("hbase.hbck.close.timeout", 120000);
     ServerManager.closeRegionSilentlyAndWait((ClusterConnection)connection, server,
@@ -163,10 +157,10 @@ public class HBaseFsckRepair {
   }
 
   /**
-   * Puts the specified HRegionInfo into META with replica related columns
+   * Puts the specified RegionInfo into META with replica related columns
    */
   public static void fixMetaHoleOnlineAndAddReplicas(Configuration conf,
-      HRegionInfo hri, Collection<ServerName> servers, int numReplicas) throws IOException {
+      RegionInfo hri, Collection<ServerName> servers, int numReplicas) throws IOException {
     Connection conn = ConnectionFactory.createConnection(conf);
     Table meta = conn.getTable(TableName.META_TABLE_NAME);
     Put put = MetaTableAccessor.makePutFromRegionInfo(hri);
@@ -191,7 +185,7 @@ public class HBaseFsckRepair {
    * Creates, flushes, and closes a new region.
    */
   public static HRegion createHDFSRegionDir(Configuration conf,
-      HRegionInfo hri, TableDescriptor htd) throws IOException {
+      RegionInfo hri, TableDescriptor htd) throws IOException {
     // Create HRegion
     Path root = FSUtils.getRootDir(conf);
     HRegion region = HRegion.createHRegion(hri, root, conf, htd, null);
@@ -204,7 +198,7 @@ public class HBaseFsckRepair {
   /*
    * Remove parent
    */
-  public static void removeParentInMeta(Configuration conf, HRegionInfo hri) throws IOException {
+  public static void removeParentInMeta(Configuration conf, RegionInfo hri) throws IOException {
     Connection conn = ConnectionFactory.createConnection(conf);
     MetaTableAccessor.deleteRegion(conn, hri);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
index 8e3e105..fb99cba 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
@@ -21,9 +21,9 @@ import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HStore;
 
@@ -61,7 +61,7 @@ public class HFileArchiveUtil {
    *         not be archived
    */
   public static Path getStoreArchivePath(Configuration conf,
-                                         HRegionInfo region,
+                                         RegionInfo region,
                                          Path tabledir,
       byte[] family) throws IOException {
     return getStoreArchivePath(conf, region, family);
@@ -76,7 +76,7 @@ public class HFileArchiveUtil {
    *         not be archived
    */
   public static Path getStoreArchivePath(Configuration conf,
-                                         HRegionInfo region,
+                                         RegionInfo region,
       byte[] family) throws IOException {
     Path rootDir = FSUtils.getRootDir(conf);
     Path tableArchiveDir = getTableArchivePath(rootDir, region.getTable());
@@ -146,7 +146,7 @@ public class HFileArchiveUtil {
   }
 
   /**
-   * Get the full path to the archive directory on the configured 
+   * Get the full path to the archive directory on the configured
    * {@link org.apache.hadoop.hbase.master.MasterFileSystem}
    * @param conf to look for archive directory name and root directory. Cannot be null. Notes for
    *          testing: requires a FileSystem root directory to be specified.
@@ -158,7 +158,7 @@ public class HFileArchiveUtil {
   }
 
   /**
-   * Get the full path to the archive directory on the configured 
+   * Get the full path to the archive directory on the configured
    * {@link org.apache.hadoop.hbase.master.MasterFileSystem}
    * @param rootdir {@link Path} to the root directory where hbase files are stored (for building
    *          the archive path)
@@ -167,7 +167,7 @@ public class HFileArchiveUtil {
   private static Path getArchivePath(final Path rootdir) {
     return new Path(rootdir, HConstants.HFILE_ARCHIVE_DIRECTORY);
   }
-  
+
   /*
    * @return table name given archive file path
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
index 53c6d7a..fe33c24 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
@@ -34,13 +34,14 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * Utility methods for interacting with the regions.
@@ -57,27 +58,36 @@ public abstract class ModifyRegionUtils {
   }
 
   public interface RegionEditTask {
-    void editRegion(final HRegionInfo region) throws IOException;
+    void editRegion(final RegionInfo region) throws IOException;
   }
 
-  public static HRegionInfo[] createHRegionInfos(TableDescriptor tableDescriptor,
+  public static RegionInfo[] createRegionInfos(TableDescriptor tableDescriptor,
       byte[][] splitKeys) {
     long regionId = System.currentTimeMillis();
-    HRegionInfo[] hRegionInfos = null;
+    RegionInfo[] hRegionInfos = null;
     if (splitKeys == null || splitKeys.length == 0) {
-      hRegionInfos = new HRegionInfo[]{
-        new HRegionInfo(tableDescriptor.getTableName(), null, null, false, regionId)
+      hRegionInfos = new RegionInfo[]{
+          RegionInfoBuilder.newBuilder(tableDescriptor.getTableName())
+           .setStartKey(null)
+           .setEndKey(null)
+           .setSplit(false)
+           .setRegionId(regionId)
+           .build()
       };
     } else {
       int numRegions = splitKeys.length + 1;
-      hRegionInfos = new HRegionInfo[numRegions];
+      hRegionInfos = new RegionInfo[numRegions];
       byte[] startKey = null;
       byte[] endKey = null;
       for (int i = 0; i < numRegions; i++) {
         endKey = (i == splitKeys.length) ? null : splitKeys[i];
         hRegionInfos[i] =
-             new HRegionInfo(tableDescriptor.getTableName(), startKey, endKey,
-                 false, regionId);
+            RegionInfoBuilder.newBuilder(tableDescriptor.getTableName())
+                .setStartKey(startKey)
+                .setEndKey(endKey)
+                .setSplit(false)
+                .setRegionId(regionId)
+                .build();
         startKey = endKey;
       }
     }
@@ -91,12 +101,12 @@ public abstract class ModifyRegionUtils {
    * @param conf {@link Configuration}
    * @param rootDir Root directory for HBase instance
    * @param tableDescriptor description of the table
-   * @param newRegions {@link HRegionInfo} that describes the regions to create
+   * @param newRegions {@link RegionInfo} that describes the regions to create
    * @param task {@link RegionFillTask} custom code to populate region after creation
    * @throws IOException
    */
-  public static List<HRegionInfo> createRegions(final Configuration conf, final Path rootDir,
-      final TableDescriptor tableDescriptor, final HRegionInfo[] newRegions,
+  public static List<RegionInfo> createRegions(final Configuration conf, final Path rootDir,
+      final TableDescriptor tableDescriptor, final RegionInfo[] newRegions,
       final RegionFillTask task) throws IOException {
     if (newRegions == null) return null;
     int regionNumber = newRegions.length;
@@ -117,22 +127,22 @@ public abstract class ModifyRegionUtils {
    * @param conf {@link Configuration}
    * @param rootDir Root directory for HBase instance
    * @param tableDescriptor description of the table
-   * @param newRegions {@link HRegionInfo} that describes the regions to create
+   * @param newRegions {@link RegionInfo} that describes the regions to create
    * @param task {@link RegionFillTask} custom code to populate region after creation
    * @throws IOException
    */
-  public static List<HRegionInfo> createRegions(final ThreadPoolExecutor exec,
+  public static List<RegionInfo> createRegions(final ThreadPoolExecutor exec,
                                                 final Configuration conf, final Path rootDir,
-                                                final TableDescriptor tableDescriptor, final HRegionInfo[] newRegions,
+                                                final TableDescriptor tableDescriptor, final RegionInfo[] newRegions,
                                                 final RegionFillTask task) throws IOException {
     if (newRegions == null) return null;
     int regionNumber = newRegions.length;
-    CompletionService<HRegionInfo> completionService = new ExecutorCompletionService<>(exec);
-    List<HRegionInfo> regionInfos = new ArrayList<>();
-    for (final HRegionInfo newRegion : newRegions) {
-      completionService.submit(new Callable<HRegionInfo>() {
+    CompletionService<RegionInfo> completionService = new ExecutorCompletionService<>(exec);
+    List<RegionInfo> regionInfos = new ArrayList<>();
+    for (final RegionInfo newRegion : newRegions) {
+      completionService.submit(new Callable<RegionInfo>() {
         @Override
-        public HRegionInfo call() throws IOException {
+        public RegionInfo call() throws IOException {
           return createRegion(conf, rootDir, tableDescriptor, newRegion, task);
         }
       });
@@ -156,12 +166,12 @@ public abstract class ModifyRegionUtils {
    * @param conf {@link Configuration}
    * @param rootDir Root directory for HBase instance
    * @param tableDescriptor description of the table
-   * @param newRegion {@link HRegionInfo} that describes the region to create
+   * @param newRegion {@link RegionInfo} that describes the region to create
    * @param task {@link RegionFillTask} custom code to populate region after creation
    * @throws IOException
    */
-  public static HRegionInfo createRegion(final Configuration conf, final Path rootDir,
-      final TableDescriptor tableDescriptor, final HRegionInfo newRegion,
+  public static RegionInfo createRegion(final Configuration conf, final Path rootDir,
+      final TableDescriptor tableDescriptor, final RegionInfo newRegion,
       final RegionFillTask task) throws IOException {
     // 1. Create HRegion
     // The WAL subsystem will use the default rootDir rather than the passed in rootDir
@@ -185,14 +195,14 @@ public abstract class ModifyRegionUtils {
    * Execute the task on the specified set of regions.
    *
    * @param exec Thread Pool Executor
-   * @param regions {@link HRegionInfo} that describes the regions to edit
+   * @param regions {@link RegionInfo} that describes the regions to edit
    * @param task {@link RegionFillTask} custom code to edit the region
    * @throws IOException
    */
   public static void editRegions(final ThreadPoolExecutor exec,
-      final Collection<HRegionInfo> regions, final RegionEditTask task) throws IOException {
+      final Collection<RegionInfo> regions, final RegionEditTask task) throws IOException {
     final ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<>(exec);
-    for (final HRegionInfo hri: regions) {
+    for (final RegionInfo hri: regions) {
       completionService.submit(new Callable<Void>() {
         @Override
         public Void call() throws IOException {
@@ -203,7 +213,7 @@ public abstract class ModifyRegionUtils {
     }
 
     try {
-      for (HRegionInfo hri: regions) {
+      for (RegionInfo hri: regions) {
         completionService.take().get();
       }
     } catch (InterruptedException e) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
index 4d9e24c..0f36a7b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
@@ -43,20 +43,19 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.cli.CommandLine;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
@@ -64,6 +63,7 @@ import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * Tool for loading/unloading regions to/from given regionserver This tool can be run from Command
@@ -253,7 +253,7 @@ public class RegionMover extends AbstractHBaseTool {
     public Boolean call() throws IOException {
       Connection conn = ConnectionFactory.createConnection(rm.conf);
       try {
-        List<HRegionInfo> regionsToMove = readRegionsFromFile(rm.filename);
+        List<RegionInfo> regionsToMove = readRegionsFromFile(rm.filename);
         if (regionsToMove.isEmpty()) {
           LOG.info("No regions to load.Exiting");
           return true;
@@ -313,7 +313,7 @@ public class RegionMover extends AbstractHBaseTool {
 
   private class Unload implements Callable<Boolean> {
 
-    List<HRegionInfo> movedRegions = Collections.synchronizedList(new ArrayList<HRegionInfo>());
+    List<RegionInfo> movedRegions = Collections.synchronizedList(new ArrayList<RegionInfo>());
     private RegionMover rm;
 
     public Unload(RegionMover rm) {
@@ -366,9 +366,9 @@ public class RegionMover extends AbstractHBaseTool {
   }
 
   private void loadRegions(Admin admin, String hostname, int port,
-      List<HRegionInfo> regionsToMove, boolean ack) throws Exception {
+      List<RegionInfo> regionsToMove, boolean ack) throws Exception {
     String server = null;
-    List<HRegionInfo> movedRegions = Collections.synchronizedList(new ArrayList<HRegionInfo>());
+    List<RegionInfo> movedRegions = Collections.synchronizedList(new ArrayList<RegionInfo>());
     int maxWaitInSeconds =
         admin.getConfiguration().getInt(SERVERSTART_WAIT_MAX_KEY, DEFAULT_SERVERSTART_WAIT_MAX);
     long maxWait = EnvironmentEdgeManager.currentTime() + maxWaitInSeconds * 1000;
@@ -402,7 +402,7 @@ public class RegionMover extends AbstractHBaseTool {
     List<Future<Boolean>> taskList = new ArrayList<>();
     int counter = 0;
     while (counter < regionsToMove.size()) {
-      HRegionInfo region = regionsToMove.get(counter);
+      RegionInfo region = regionsToMove.get(counter);
       String currentServer = getServerNameForRegion(admin, region);
       if (currentServer == null) {
         LOG.warn("Could not get server for Region:" + region.getEncodedName() + " moving on");
@@ -462,8 +462,8 @@ public class RegionMover extends AbstractHBaseTool {
   @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DLS_DEAD_LOCAL_STORE",
       justification="FB is wrong; its size is read")
   private void unloadRegions(Admin admin, String server, ArrayList<String> regionServers,
-      boolean ack, List<HRegionInfo> movedRegions) throws Exception {
-    List<HRegionInfo> regionsToMove = new ArrayList<>();// FindBugs: DLS_DEAD_LOCAL_STORE
+      boolean ack, List<RegionInfo> movedRegions) throws Exception {
+    List<RegionInfo> regionsToMove = new ArrayList<>();// FindBugs: DLS_DEAD_LOCAL_STORE
     regionsToMove = getRegions(this.conf, server);
     if (regionsToMove.isEmpty()) {
       LOG.info("No Regions to move....Quitting now");
@@ -540,13 +540,13 @@ public class RegionMover extends AbstractHBaseTool {
    */
   private class MoveWithAck implements Callable<Boolean> {
     private Admin admin;
-    private HRegionInfo region;
+    private RegionInfo region;
     private String targetServer;
-    private List<HRegionInfo> movedRegions;
+    private List<RegionInfo> movedRegions;
     private String sourceServer;
 
-    public MoveWithAck(Admin admin, HRegionInfo regionInfo, String sourceServer,
-        String targetServer, List<HRegionInfo> movedRegions) {
+    public MoveWithAck(Admin admin, RegionInfo regionInfo, String sourceServer,
+        String targetServer, List<RegionInfo> movedRegions) {
       this.admin = admin;
       this.region = regionInfo;
       this.targetServer = targetServer;
@@ -605,13 +605,13 @@ public class RegionMover extends AbstractHBaseTool {
    */
   private static class MoveWithoutAck implements Callable<Boolean> {
     private Admin admin;
-    private HRegionInfo region;
+    private RegionInfo region;
     private String targetServer;
-    private List<HRegionInfo> movedRegions;
+    private List<RegionInfo> movedRegions;
     private String sourceServer;
 
-    public MoveWithoutAck(Admin admin, HRegionInfo regionInfo, String sourceServer,
-        String targetServer, List<HRegionInfo> movedRegions) {
+    public MoveWithoutAck(Admin admin, RegionInfo regionInfo, String sourceServer,
+        String targetServer, List<RegionInfo> movedRegions) {
       this.admin = admin;
       this.region = regionInfo;
       this.targetServer = targetServer;
@@ -637,8 +637,8 @@ public class RegionMover extends AbstractHBaseTool {
     }
   }
 
-  private List<HRegionInfo> readRegionsFromFile(String filename) throws IOException {
-    List<HRegionInfo> regions = new ArrayList<>();
+  private List<RegionInfo> readRegionsFromFile(String filename) throws IOException {
+    List<RegionInfo> regions = new ArrayList<>();
     File f = new File(filename);
     if (!f.exists()) {
       return regions;
@@ -651,7 +651,7 @@ public class RegionMover extends AbstractHBaseTool {
       int numRegions = dis.readInt();
       int index = 0;
       while (index < numRegions) {
-        regions.add(HRegionInfo.parseFromOrNull(Bytes.readByteArray(dis)));
+        regions.add(RegionInfo.parseFromOrNull(Bytes.readByteArray(dis)));
         index++;
       }
     } catch (IOException e) {
@@ -675,10 +675,10 @@ public class RegionMover extends AbstractHBaseTool {
    * @return List of Regions online on the server
    * @throws IOException
    */
-  private List<HRegionInfo> getRegions(Configuration conf, String server) throws IOException {
+  private List<RegionInfo> getRegions(Configuration conf, String server) throws IOException {
     Connection conn = ConnectionFactory.createConnection(conf);
     try {
-      return conn.getAdmin().getOnlineRegions(ServerName.valueOf(server));
+      return conn.getAdmin().getRegions(ServerName.valueOf(server));
     } finally {
       conn.close();
     }
@@ -691,15 +691,15 @@ public class RegionMover extends AbstractHBaseTool {
    * @param movedRegions
    * @throws IOException
    */
-  private void writeFile(String filename, List<HRegionInfo> movedRegions) throws IOException {
+  private void writeFile(String filename, List<RegionInfo> movedRegions) throws IOException {
     FileOutputStream fos = null;
     DataOutputStream dos = null;
     try {
       fos = new FileOutputStream(filename);
       dos = new DataOutputStream(fos);
       dos.writeInt(movedRegions.size());
-      for (HRegionInfo region : movedRegions) {
-        Bytes.writeByteArray(dos, region.toByteArray());
+      for (RegionInfo region : movedRegions) {
+        Bytes.writeByteArray(dos, RegionInfo.toByteArray(region));
       }
     } catch (IOException e) {
       LOG.error("ERROR: Was Not able to write regions moved to output file but moved "
@@ -846,7 +846,7 @@ public class RegionMover extends AbstractHBaseTool {
    * @param region
    * @throws IOException
    */
-  private void isSuccessfulScan(Admin admin, HRegionInfo region) throws IOException {
+  private void isSuccessfulScan(Admin admin, RegionInfo region) throws IOException {
     Scan scan = new Scan(region.getStartKey());
     scan.setBatch(1);
     scan.setCaching(1);
@@ -877,7 +877,7 @@ public class RegionMover extends AbstractHBaseTool {
    * @return true if region is hosted on serverName otherwise false
    * @throws IOException
    */
-  private boolean isSameServer(Admin admin, HRegionInfo region, String serverName)
+  private boolean isSameServer(Admin admin, RegionInfo region, String serverName)
       throws IOException {
     String serverForRegion = getServerNameForRegion(admin, region);
     if (serverForRegion != null && serverForRegion.equals(serverName)) {
@@ -894,7 +894,7 @@ public class RegionMover extends AbstractHBaseTool {
    * @return regionServer hosting the given region
    * @throws IOException
    */
-  private String getServerNameForRegion(Admin admin, HRegionInfo region) throws IOException {
+  private String getServerNameForRegion(Admin admin, RegionInfo region) throws IOException {
     String server = null;
     if (!admin.isTableEnabled(region.getTable())) {
       return null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java
index 34a9759..9b61b8b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java
@@ -25,18 +25,15 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
-import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.io.Reference;
-import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.regionserver.RegionReplicaReplicationEndpoint;
 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
@@ -82,9 +79,9 @@ public class ServerRegionReplicaUtil extends RegionReplicaUtil {
 
   /**
    * Returns the regionInfo object to use for interacting with the file system.
-   * @return An HRegionInfo object to interact with the filesystem
+   * @return An RegionInfo object to interact with the filesystem
    */
-  public static HRegionInfo getRegionInfoForFs(HRegionInfo regionInfo) {
+  public static RegionInfo getRegionInfoForFs(RegionInfo regionInfo) {
     if (regionInfo == null) {
       return null;
     }
@@ -121,11 +118,11 @@ public class ServerRegionReplicaUtil extends RegionReplicaUtil {
    * @throws IOException
    */
   public static StoreFileInfo getStoreFileInfo(Configuration conf, FileSystem fs,
-      HRegionInfo regionInfo, HRegionInfo regionInfoForFs, String familyName, Path path)
+      RegionInfo regionInfo, RegionInfo regionInfoForFs, String familyName, Path path)
       throws IOException {
 
     // if this is a primary region, just return the StoreFileInfo constructed from path
-    if (regionInfo.equals(regionInfoForFs)) {
+    if (RegionInfo.COMPARATOR.compare(regionInfo, regionInfoForFs) == 0) {
       return new StoreFileInfo(conf, fs, path);
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java
index a6d43d6..652aa2f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java
@@ -32,14 +32,14 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-
-// imports for things that haven't moved from regionserver.wal yet.
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
 import org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost;
 import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.yetus.audience.InterfaceAudience;
+
+// imports for things that haven't moved from regionserver.wal yet.
 
 /**
  * No-op implementation of {@link WALProvider} used when the WAL is disabled.
@@ -109,7 +109,7 @@ class DisabledWALProvider implements WALProvider {
     public void registerWALActionsListener(final WALActionsListener listener) {
       listeners.add(listener);
     }
-    
+
     @Override
     public boolean unregisterWALActionsListener(final WALActionsListener listener) {
       return listeners.remove(listener);
@@ -161,7 +161,7 @@ class DisabledWALProvider implements WALProvider {
     }
 
     @Override
-    public long append(HRegionInfo info, WALKey key, WALEdit edits, boolean inMemstore)
+    public long append(RegionInfo info, WALKey key, WALEdit edits, boolean inMemstore)
         throws IOException {
       if (!this.listeners.isEmpty()) {
         final long start = System.nanoTime();

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java
index 9ec58ab..886ec78 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java
@@ -25,17 +25,19 @@ import java.util.Map;
 import java.util.Set;
 
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.yetus.audience.InterfaceStability;
-// imports we use from yet-to-be-moved regionsever.wal
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.regionserver.wal.CompressionContext;
 import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
 import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
 import org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost;
 import org.apache.hadoop.hbase.replication.regionserver.WALFileLengthProvider;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.yetus.audience.InterfaceStability;
+
 import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 
+// imports we use from yet-to-be-moved regionsever.wal
+
 /**
  * A Write Ahead Log (WAL) provides service for reading, writing waledits. This interface provides
  * APIs for WAL users (such as RegionServer) to use the WAL (do append, sync, etc).
@@ -66,7 +68,7 @@ public interface WAL extends Closeable, WALFileLengthProvider {
    *
    * @return If lots of logs, flush the returned regions so next time through we
    *         can clean logs. Returns null if nothing to flush. Names are actual
-   *         region names as returned by {@link HRegionInfo#getEncodedName()}
+   *         region names as returned by {@link RegionInfo#getEncodedName()}
    */
   byte[][] rollWriter() throws FailedLogCloseException, IOException;
 
@@ -82,7 +84,7 @@ public interface WAL extends Closeable, WALFileLengthProvider {
    *          been written to the current writer
    * @return If lots of logs, flush the returned regions so next time through we
    *         can clean logs. Returns null if nothing to flush. Names are actual
-   *         region names as returned by {@link HRegionInfo#getEncodedName()}
+   *         region names as returned by {@link RegionInfo#getEncodedName()}
    */
   byte[][] rollWriter(boolean force) throws FailedLogCloseException, IOException;
 
@@ -114,7 +116,7 @@ public interface WAL extends Closeable, WALFileLengthProvider {
    * @return Returns a 'transaction id' and <code>key</code> will have the region edit/sequence id
    * in it.
    */
-  long append(HRegionInfo info, WALKey key, WALEdit edits, boolean inMemstore) throws IOException;
+  long append(RegionInfo info, WALKey key, WALEdit edits, boolean inMemstore) throws IOException;
 
   /**
    * updates the seuence number of a specific store.

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java
index 6a30f9c..260e6db 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java
@@ -26,20 +26,20 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.codec.Codec;
 import org.apache.hadoop.hbase.io.HeapSize;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ClassSize;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor;
 
 
 /**
@@ -192,7 +192,7 @@ public class WALEdit implements HeapSize {
     return sb.toString();
   }
 
-  public static WALEdit createFlushWALEdit(HRegionInfo hri, FlushDescriptor f) {
+  public static WALEdit createFlushWALEdit(RegionInfo hri, FlushDescriptor f) {
     KeyValue kv = new KeyValue(getRowForRegion(hri), METAFAMILY, FLUSH,
       EnvironmentEdgeManager.currentTime(), f.toByteArray());
     return new WALEdit().add(kv);
@@ -205,7 +205,7 @@ public class WALEdit implements HeapSize {
     return null;
   }
 
-  public static WALEdit createRegionEventWALEdit(HRegionInfo hri,
+  public static WALEdit createRegionEventWALEdit(RegionInfo hri,
       RegionEventDescriptor regionEventDesc) {
     KeyValue kv = new KeyValue(getRowForRegion(hri), METAFAMILY, REGION_EVENT,
       EnvironmentEdgeManager.currentTime(), regionEventDesc.toByteArray());
@@ -224,14 +224,14 @@ public class WALEdit implements HeapSize {
    * @param c
    * @return A WALEdit that has <code>c</code> serialized as its value
    */
-  public static WALEdit createCompaction(final HRegionInfo hri, final CompactionDescriptor c) {
+  public static WALEdit createCompaction(final RegionInfo hri, final CompactionDescriptor c) {
     byte [] pbbytes = c.toByteArray();
     KeyValue kv = new KeyValue(getRowForRegion(hri), METAFAMILY, COMPACTION,
       EnvironmentEdgeManager.currentTime(), pbbytes);
     return new WALEdit().add(kv); //replication scope null so that this won't be replicated
   }
 
-  public static byte[] getRowForRegion(HRegionInfo hri) {
+  public static byte[] getRowForRegion(RegionInfo hri) {
     byte[] startKey = hri.getStartKey();
     if (startKey.length == 0) {
       // empty row key is not allowed in mutations because it is both the start key and the end key
@@ -265,11 +265,11 @@ public class WALEdit implements HeapSize {
   /**
    * Create a bulk loader WALEdit
    *
-   * @param hri                The HRegionInfo for the region in which we are bulk loading
+   * @param hri                The RegionInfo for the region in which we are bulk loading
    * @param bulkLoadDescriptor The descriptor for the Bulk Loader
    * @return The WALEdit for the BulkLoad
    */
-  public static WALEdit createBulkLoadEvent(HRegionInfo hri,
+  public static WALEdit createBulkLoadEvent(RegionInfo hri,
                                             WALProtos.BulkLoadDescriptor bulkLoadDescriptor) {
     KeyValue kv = new KeyValue(getRowForRegion(hri),
         METAFAMILY,


Mime
View raw message