hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From zhang...@apache.org
Subject [15/26] hbase git commit: HBASE-19496 Reusing the ByteBuffer in rpc layer corrupt the ServerLoad and RegionLoad
Date Fri, 22 Dec 2017 13:45:13 GMT
http://git-wip-us.apache.org/repos/asf/hbase/blob/448ba3a7/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
index 2fdb731..2a56e57 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
@@ -24,23 +24,26 @@ import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
-import java.util.TreeSet;
-
-import org.apache.hadoop.hbase.shaded.com.google.common.base.Objects;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.Coprocessor;
+import java.util.stream.Collectors;
 import org.apache.hadoop.hbase.replication.ReplicationLoadSink;
 import org.apache.hadoop.hbase.replication.ReplicationLoadSource;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Strings;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hadoop.hbase.shaded.com.google.common.base.Objects;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
 
 /**
  * This class is used for exporting current state of load on a RegionServer.
+ *
+ * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+ *             Use {@link ServerMetrics} instead.
  */
 @InterfaceAudience.Public
-public class ServerLoad {
+@Deprecated
+public class ServerLoad implements ServerMetrics {
+  private final ServerMetrics metrics;
   private int stores = 0;
   private int storefiles = 0;
   private int storeUncompressedSizeMB = 0;
@@ -55,113 +58,200 @@ public class ServerLoad {
   private int totalStaticBloomSizeKB = 0;
   private long totalCompactingKVs = 0;
   private long currentCompactedKVs = 0;
-  private long reportTime = 0;
 
+  /**
+   * DONT USE this construction. It make a fake server name;
+   */
   @InterfaceAudience.Private
   public ServerLoad(ClusterStatusProtos.ServerLoad serverLoad) {
+    this(ServerName.valueOf("localhost,1,1"), serverLoad);
+  }
+
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD")
+  @InterfaceAudience.Private
+  public ServerLoad(ServerName name, ClusterStatusProtos.ServerLoad serverLoad) {
+    this(ServerMetricsBuilder.toServerMetrics(name, serverLoad));
     this.serverLoad = serverLoad;
-    this.reportTime = System.currentTimeMillis();
-    for (ClusterStatusProtos.RegionLoad rl: serverLoad.getRegionLoadsList()) {
-      stores += rl.getStores();
-      storefiles += rl.getStorefiles();
-      storeUncompressedSizeMB += rl.getStoreUncompressedSizeMB();
-      storefileSizeMB += rl.getStorefileSizeMB();
-      memstoreSizeMB += rl.getMemStoreSizeMB();
-      storefileIndexSizeKB += rl.getStorefileIndexSizeKB();
-      readRequestsCount += rl.getReadRequestsCount();
-      filteredReadRequestsCount += rl.getFilteredReadRequestsCount();
-      writeRequestsCount += rl.getWriteRequestsCount();
-      rootIndexSizeKB += rl.getRootIndexSizeKB();
-      totalStaticIndexSizeKB += rl.getTotalStaticIndexSizeKB();
-      totalStaticBloomSizeKB += rl.getTotalStaticBloomSizeKB();
-      totalCompactingKVs += rl.getTotalCompactingKVs();
-      currentCompactedKVs += rl.getCurrentCompactedKVs();
+  }
+
+  @InterfaceAudience.Private
+  public ServerLoad(ServerMetrics metrics) {
+    this.metrics = metrics;
+    this.serverLoad = ServerMetricsBuilder.toServerLoad(metrics);
+    for (RegionMetrics rl : metrics.getRegionMetrics().values()) {
+      stores += rl.getStoreCount();
+      storefiles += rl.getStoreFileCount();
+      storeUncompressedSizeMB += rl.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE);
+      storefileSizeMB += rl.getStoreFileSize().get(Size.Unit.MEGABYTE);
+      memstoreSizeMB += rl.getMemStoreSize().get(Size.Unit.MEGABYTE);
+      readRequestsCount += rl.getReadRequestCount();
+      filteredReadRequestsCount += rl.getFilteredReadRequestCount();
+      writeRequestsCount += rl.getWriteRequestCount();
+      storefileIndexSizeKB += rl.getStoreFileIndexSize().get(Size.Unit.KILOBYTE);
+      rootIndexSizeKB += rl.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE);
+      totalStaticIndexSizeKB += rl.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE);
+      totalStaticBloomSizeKB += rl.getBloomFilterSize().get(Size.Unit.KILOBYTE);
+      totalCompactingKVs += rl.getCompactingCellCount();
+      currentCompactedKVs += rl.getCompactedCellCount();
     }
   }
 
-  // NOTE: Function name cannot start with "get" because then an OpenDataException is thrown because
-  // HBaseProtos.ServerLoad cannot be converted to an open data type(see HBASE-5967).
-  /* @return the underlying ServerLoad protobuf object */
+  /**
+   * NOTE: Function name cannot start with "get" because then an OpenDataException is thrown because
+   * HBaseProtos.ServerLoad cannot be converted to an open data type(see HBASE-5967).
+   * @return the underlying ServerLoad protobuf object
+   * @deprecated DONT use this pb object since the byte array backed may be modified in rpc layer
+   */
   @InterfaceAudience.Private
+  @Deprecated
   public ClusterStatusProtos.ServerLoad obtainServerLoadPB() {
     return serverLoad;
   }
 
   protected ClusterStatusProtos.ServerLoad serverLoad;
 
-  /* @return number of requests  since last report. */
+  /**
+   * @return number of requests  since last report.
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0.
+   *             Use {@link #getRequestCountPerSecond} instead.
+   */
+  @Deprecated
   public long getNumberOfRequests() {
-    return serverLoad.getNumberOfRequests();
+    return getRequestCountPerSecond();
   }
+
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             No flag in 2.0
+   */
+  @Deprecated
   public boolean hasNumberOfRequests() {
-    return serverLoad.hasNumberOfRequests();
+    return true;
   }
 
-  /* @return total Number of requests from the start of the region server. */
+  /**
+   * @return total Number of requests from the start of the region server.
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0.
+   *             Use {@link #getRequestCount} instead.
+   */
+  @Deprecated
   public long getTotalNumberOfRequests() {
-    return serverLoad.getTotalNumberOfRequests();
+    return getRequestCount();
   }
+
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             No flag in 2.0
+   */
+  @Deprecated
   public boolean hasTotalNumberOfRequests() {
-    return serverLoad.hasTotalNumberOfRequests();
+    return true;
   }
 
-  /* @return the amount of used heap, in MB. */
+  /**
+   * @return the amount of used heap, in MB.
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0.
+   *             Use {@link #getUsedHeapSize} instead.
+   */
+  @Deprecated
   public int getUsedHeapMB() {
-    return serverLoad.getUsedHeapMB();
+    return (int) getUsedHeapSize().get(Size.Unit.MEGABYTE);
   }
+
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             No flag in 2.0
+   */
+  @Deprecated
   public boolean hasUsedHeapMB() {
-    return serverLoad.hasUsedHeapMB();
+    return true;
   }
 
-  /* @return the maximum allowable size of the heap, in MB. */
+  /**
+   * @return the maximum allowable size of the heap, in MB.
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getMaxHeapSize} instead.
+   */
+  @Deprecated
   public int getMaxHeapMB() {
-    return serverLoad.getMaxHeapMB();
+    return (int) getMaxHeapSize().get(Size.Unit.MEGABYTE);
   }
+
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             No flag in 2.0
+   */
+  @Deprecated
   public boolean hasMaxHeapMB() {
-    return serverLoad.hasMaxHeapMB();
+    return true;
   }
 
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getRegionMetrics} instead.
+   */
+  @Deprecated
   public int getStores() {
     return stores;
   }
 
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0.
+   *             Use {@link #getRegionMetrics} instead.
+   */
+  @Deprecated
   public int getStorefiles() {
     return storefiles;
   }
 
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getRegionMetrics} instead.
+   */
+  @Deprecated
   public int getStoreUncompressedSizeMB() {
     return storeUncompressedSizeMB;
   }
 
   /**
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
-   * Use {@link #getStorefileSizeMB()} instead.
+   *             Use {@link #getRegionMetrics} instead.
    */
   @Deprecated
   public int getStorefileSizeInMB() {
     return storefileSizeMB;
   }
 
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getRegionMetrics} instead.
+   */
+  @Deprecated
   public int getStorefileSizeMB() {
     return storefileSizeMB;
   }
 
   /**
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
-   * Use {@link #getMemStoreSizeMB()} instead.
+   *             Use {@link #getRegionMetrics} instead.
    */
   @Deprecated
   public int getMemstoreSizeInMB() {
     return memstoreSizeMB;
   }
 
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *     Use {@link #getRegionMetrics} instead.
+   */
+  @Deprecated
   public int getMemStoreSizeMB() {
     return memstoreSizeMB;
   }
 
   /**
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
-   * Use {@link #getStorefileIndexSizeKB()} instead.
+   *     Use {@link #getRegionMetrics} instead.
    */
   @Deprecated
   public int getStorefileIndexSizeInMB() {
@@ -169,71 +259,162 @@ public class ServerLoad {
     return (int) (getStorefileIndexSizeKB() >> 10);
   }
 
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *     Use {@link #getRegionMetrics} instead.
+   */
+  @Deprecated
   public long getStorefileIndexSizeKB() {
     return storefileIndexSizeKB;
   }
 
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *     Use {@link #getRegionMetrics} instead.
+   */
+  @Deprecated
   public long getReadRequestsCount() {
     return readRequestsCount;
   }
 
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *     Use {@link #getRegionMetrics} instead.
+   */
+  @Deprecated
   public long getFilteredReadRequestsCount() {
     return filteredReadRequestsCount;
   }
 
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *     Use {@link #getRegionMetrics} instead.
+   */
+  @Deprecated
   public long getWriteRequestsCount() {
     return writeRequestsCount;
   }
 
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *     Use {@link #getRegionMetrics} instead.
+   */
+  @Deprecated
   public int getRootIndexSizeKB() {
     return rootIndexSizeKB;
   }
 
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *     Use {@link #getRegionMetrics} instead.
+   */
+  @Deprecated
   public int getTotalStaticIndexSizeKB() {
     return totalStaticIndexSizeKB;
   }
 
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *     Use {@link #getRegionMetrics} instead.
+   */
+  @Deprecated
   public int getTotalStaticBloomSizeKB() {
     return totalStaticBloomSizeKB;
   }
 
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *     Use {@link #getRegionMetrics} instead.
+   */
+  @Deprecated
   public long getTotalCompactingKVs() {
     return totalCompactingKVs;
   }
 
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *     Use {@link #getRegionMetrics} instead.
+   */
+  @Deprecated
   public long getCurrentCompactedKVs() {
     return currentCompactedKVs;
   }
 
   /**
-   * @return the number of regions
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getRegionMetrics} instead.
    */
+  @Deprecated
   public int getNumberOfRegions() {
-    return serverLoad.getRegionLoadsCount();
+    return metrics.getRegionMetrics().size();
+  }
+
+  @Override
+  public ServerName getServerName() {
+    return metrics.getServerName();
   }
 
+  @Override
+  public long getRequestCountPerSecond() {
+    return metrics.getRequestCountPerSecond();
+  }
+
+  @Override
+  public long getRequestCount() {
+    return metrics.getRequestCount();
+  }
+
+  @Override
+  public Size getUsedHeapSize() {
+    return metrics.getUsedHeapSize();
+  }
+
+  @Override
+  public Size getMaxHeapSize() {
+    return metrics.getMaxHeapSize();
+  }
+
+  @Override
   public int getInfoServerPort() {
-    return serverLoad.getInfoServerPort();
+    return metrics.getInfoServerPort();
   }
 
   /**
    * Call directly from client such as hbase shell
    * @return the list of ReplicationLoadSource
    */
+  @Override
   public List<ReplicationLoadSource> getReplicationLoadSourceList() {
-    return ProtobufUtil.toReplicationLoadSourceList(serverLoad.getReplLoadSourceList());
+    return metrics.getReplicationLoadSourceList();
   }
 
   /**
    * Call directly from client such as hbase shell
    * @return ReplicationLoadSink
    */
+  @Override
   public ReplicationLoadSink getReplicationLoadSink() {
-    if (serverLoad.hasReplLoadSink()) {
-      return ProtobufUtil.toReplicationLoadSink(serverLoad.getReplLoadSink());
-    } else {
-      return null;
-    }
+    return metrics.getReplicationLoadSink();
+  }
+
+  @Override
+  public Map<byte[], RegionMetrics> getRegionMetrics() {
+    return metrics.getRegionMetrics();
+  }
+
+  @Override
+  public List<String> getCoprocessorNames() {
+    return metrics.getCoprocessorNames();
+  }
+
+  @Override
+  public long getReportTimestamp() {
+    return metrics.getReportTimestamp();
+  }
+
+  @Override
+  public long getLastReportTimestamp() {
+    return metrics.getLastReportTimestamp();
   }
 
   /**
@@ -243,8 +424,11 @@ public class ServerLoad {
    * interim, until we can figure out how to make rebalancing use all the info
    * available, we're just going to make load purely the number of regions.
    *
-   * @return load factor for this server
+   * @return load factor for this server.
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getNumberOfRegions} instead.
    */
+  @Deprecated
   public int getLoad() {
     // See above comment
     // int load = numberOfRequests == 0 ? 1 : numberOfRequests;
@@ -254,53 +438,43 @@ public class ServerLoad {
   }
 
   /**
-   * @return region load metrics
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getRegionMetrics} instead.
    */
+  @Deprecated
   public Map<byte[], RegionLoad> getRegionsLoad() {
-    Map<byte[], RegionLoad> regionLoads =
-      new TreeMap<>(Bytes.BYTES_COMPARATOR);
-    for (ClusterStatusProtos.RegionLoad rl : serverLoad.getRegionLoadsList()) {
-      RegionLoad regionLoad = new RegionLoad(rl);
-      regionLoads.put(regionLoad.getName(), regionLoad);
-    }
-    return regionLoads;
+    return getRegionMetrics().entrySet().stream()
+        .collect(Collectors.toMap(Map.Entry::getKey, e -> new RegionLoad(e.getValue()),
+          (v1, v2) -> {
+            throw new RuntimeException("key collisions?");
+          }, () -> new TreeMap(Bytes.BYTES_COMPARATOR)));
   }
 
   /**
-   * Return the RegionServer-level coprocessors
-   * @return string array of loaded RegionServer-level coprocessors
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getCoprocessorNames} instead.
    */
+  @Deprecated
   public String[] getRegionServerCoprocessors() {
-    List<Coprocessor> list = obtainServerLoadPB().getCoprocessorsList();
-    String [] ret = new String[list.size()];
-    int i = 0;
-    for (Coprocessor elem : list) {
-      ret[i++] = elem.getName();
-    }
-
-    return ret;
+    return getCoprocessorNames().toArray(new String[getCoprocessorNames().size()]);
   }
 
   /**
-   * Return the RegionServer-level and Region-level coprocessors
-   * @return string array of loaded RegionServer-level and
-   *         Region-level coprocessors
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getCoprocessorNames} instead.
    */
+  @Deprecated
   public String[] getRsCoprocessors() {
-    // Need a set to remove duplicates, but since generated Coprocessor class
-    // is not Comparable, make it a Set<String> instead of Set<Coprocessor>
-    TreeSet<String> coprocessSet = new TreeSet<>();
-    for (Coprocessor coprocessor : obtainServerLoadPB().getCoprocessorsList()) {
-      coprocessSet.add(coprocessor.getName());
-    }
-    return coprocessSet.toArray(new String[coprocessSet.size()]);
+    return getRegionServerCoprocessors();
   }
 
   /**
-   * @return number of requests per second received since the last report
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getRequestCountPerSecond} instead.
    */
+  @Deprecated
   public double getRequestsPerSecond() {
-    return getNumberOfRequests();
+    return getRequestCountPerSecond();
   }
 
   /**
@@ -308,70 +482,73 @@ public class ServerLoad {
    */
   @Override
   public String toString() {
-     StringBuilder sb =
-        Strings.appendKeyValue(new StringBuilder(), "requestsPerSecond",
-          Double.valueOf(getRequestsPerSecond()));
+    StringBuilder sb = Strings.appendKeyValue(new StringBuilder(), "requestsPerSecond",
+      Double.valueOf(getRequestsPerSecond()));
     Strings.appendKeyValue(sb, "numberOfOnlineRegions", Integer.valueOf(getNumberOfRegions()));
-    sb = Strings.appendKeyValue(sb, "usedHeapMB", Integer.valueOf(this.getUsedHeapMB()));
-    sb = Strings.appendKeyValue(sb, "maxHeapMB", Integer.valueOf(getMaxHeapMB()));
-    sb = Strings.appendKeyValue(sb, "numberOfStores", Integer.valueOf(this.stores));
-    sb = Strings.appendKeyValue(sb, "numberOfStorefiles", Integer.valueOf(this.storefiles));
-    sb =
-        Strings.appendKeyValue(sb, "storefileUncompressedSizeMB",
-          Integer.valueOf(this.storeUncompressedSizeMB));
-    sb = Strings.appendKeyValue(sb, "storefileSizeMB", Integer.valueOf(this.storefileSizeMB));
+    Strings.appendKeyValue(sb, "usedHeapMB", Integer.valueOf(this.getUsedHeapMB()));
+    Strings.appendKeyValue(sb, "maxHeapMB", Integer.valueOf(getMaxHeapMB()));
+    Strings.appendKeyValue(sb, "numberOfStores", Integer.valueOf(this.stores));
+    Strings.appendKeyValue(sb, "numberOfStorefiles", Integer.valueOf(this.storefiles));
+    Strings.appendKeyValue(sb, "storefileUncompressedSizeMB",
+        Integer.valueOf(this.storeUncompressedSizeMB));
+    Strings.appendKeyValue(sb, "storefileSizeMB", Integer.valueOf(this.storefileSizeMB));
     if (this.storeUncompressedSizeMB != 0) {
-      sb =
-          Strings.appendKeyValue(
-            sb,
-            "compressionRatio",
-            String.format("%.4f", (float) this.storefileSizeMB
-                / (float) this.storeUncompressedSizeMB));
+      Strings.appendKeyValue(sb, "compressionRatio", String.format("%.4f",
+          (float) this.storefileSizeMB / (float) this.storeUncompressedSizeMB));
     }
-    sb = Strings.appendKeyValue(sb, "memstoreSizeMB", Integer.valueOf(this.memstoreSizeMB));
-    sb =
-        Strings.appendKeyValue(sb, "storefileIndexSizeKB",
-          Long.valueOf(this.storefileIndexSizeKB));
-    sb = Strings.appendKeyValue(sb, "readRequestsCount", Long.valueOf(this.readRequestsCount));
-    sb = Strings.appendKeyValue(sb, "filteredReadRequestsCount",
-      Long.valueOf(this.filteredReadRequestsCount));
-    sb = Strings.appendKeyValue(sb, "writeRequestsCount", Long.valueOf(this.writeRequestsCount));
-    sb = Strings.appendKeyValue(sb, "rootIndexSizeKB", Integer.valueOf(this.rootIndexSizeKB));
-    sb =
-        Strings.appendKeyValue(sb, "totalStaticIndexSizeKB",
-          Integer.valueOf(this.totalStaticIndexSizeKB));
-    sb =
-        Strings.appendKeyValue(sb, "totalStaticBloomSizeKB",
-          Integer.valueOf(this.totalStaticBloomSizeKB));
-    sb = Strings.appendKeyValue(sb, "totalCompactingKVs", Long.valueOf(this.totalCompactingKVs));
-    sb = Strings.appendKeyValue(sb, "currentCompactedKVs", Long.valueOf(this.currentCompactedKVs));
+    Strings.appendKeyValue(sb, "memstoreSizeMB", Integer.valueOf(this.memstoreSizeMB));
+    Strings.appendKeyValue(sb, "storefileIndexSizeKB",
+        Long.valueOf(this.storefileIndexSizeKB));
+    Strings.appendKeyValue(sb, "readRequestsCount", Long.valueOf(this.readRequestsCount));
+    Strings.appendKeyValue(sb, "filteredReadRequestsCount",
+        Long.valueOf(this.filteredReadRequestsCount));
+    Strings.appendKeyValue(sb, "writeRequestsCount", Long.valueOf(this.writeRequestsCount));
+    Strings.appendKeyValue(sb, "rootIndexSizeKB", Integer.valueOf(this.rootIndexSizeKB));
+    Strings.appendKeyValue(sb, "totalStaticIndexSizeKB",
+        Integer.valueOf(this.totalStaticIndexSizeKB));
+    Strings.appendKeyValue(sb, "totalStaticBloomSizeKB",
+        Integer.valueOf(this.totalStaticBloomSizeKB));
+    Strings.appendKeyValue(sb, "totalCompactingKVs", Long.valueOf(this.totalCompactingKVs));
+    Strings.appendKeyValue(sb, "currentCompactedKVs", Long.valueOf(this.currentCompactedKVs));
     float compactionProgressPct = Float.NaN;
     if (this.totalCompactingKVs > 0) {
       compactionProgressPct =
           Float.valueOf((float) this.currentCompactedKVs / this.totalCompactingKVs);
     }
-    sb = Strings.appendKeyValue(sb, "compactionProgressPct", compactionProgressPct);
+    Strings.appendKeyValue(sb, "compactionProgressPct", compactionProgressPct);
 
     String[] coprocessorStrings = getRsCoprocessors();
     if (coprocessorStrings != null) {
-      sb = Strings.appendKeyValue(sb, "coprocessors", Arrays.toString(coprocessorStrings));
+      Strings.appendKeyValue(sb, "coprocessors", Arrays.toString(coprocessorStrings));
     }
     return sb.toString();
   }
 
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link ServerMetricsBuilder#of(ServerName)} instead.
+   */
+  @Deprecated
   public static final ServerLoad EMPTY_SERVERLOAD =
-    new ServerLoad(ClusterStatusProtos.ServerLoad.newBuilder().build());
+      new ServerLoad(ServerName.valueOf("localhost,1,1"),
+          ClusterStatusProtos.ServerLoad.newBuilder().build());
 
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *             Use {@link #getReportTimestamp} instead.
+   */
+  @Deprecated
   public long getReportTime() {
-    return reportTime;
+    return getReportTimestamp();
   }
 
   @Override
   public int hashCode() {
-    return Objects.hashCode(stores, storefiles, storeUncompressedSizeMB,
-      storefileSizeMB, memstoreSizeMB, storefileIndexSizeKB, readRequestsCount,
-      filteredReadRequestsCount, writeRequestsCount, rootIndexSizeKB, totalStaticIndexSizeKB,
-      totalStaticBloomSizeKB, totalCompactingKVs, currentCompactedKVs);
+    return Objects
+        .hashCode(stores, storefiles, storeUncompressedSizeMB, storefileSizeMB, memstoreSizeMB,
+            storefileIndexSizeKB, readRequestsCount, filteredReadRequestsCount, writeRequestsCount,
+            rootIndexSizeKB, totalStaticIndexSizeKB, totalStaticBloomSizeKB, totalCompactingKVs,
+            currentCompactedKVs);
   }
 
   @Override
@@ -379,16 +556,17 @@ public class ServerLoad {
     if (other == this) return true;
     if (other instanceof ServerLoad) {
       ServerLoad sl = ((ServerLoad) other);
-      return stores == sl.stores && storefiles == sl.storefiles &&
-        storeUncompressedSizeMB == sl.storeUncompressedSizeMB &&
-        storefileSizeMB == sl.storefileSizeMB && memstoreSizeMB == sl.memstoreSizeMB &&
-        storefileIndexSizeKB == sl.storefileIndexSizeKB && readRequestsCount == sl.readRequestsCount &&
-        filteredReadRequestsCount == sl.filteredReadRequestsCount &&
-        writeRequestsCount == sl.writeRequestsCount && rootIndexSizeKB == sl.rootIndexSizeKB &&
-        totalStaticIndexSizeKB == sl.totalStaticIndexSizeKB &&
-        totalStaticBloomSizeKB == sl.totalStaticBloomSizeKB &&
-        totalCompactingKVs == sl.totalCompactingKVs &&
-        currentCompactedKVs == sl.currentCompactedKVs;
+      return stores == sl.stores && storefiles == sl.storefiles
+          && storeUncompressedSizeMB == sl.storeUncompressedSizeMB
+          && storefileSizeMB == sl.storefileSizeMB && memstoreSizeMB == sl.memstoreSizeMB
+          && storefileIndexSizeKB == sl.storefileIndexSizeKB
+          && readRequestsCount == sl.readRequestsCount
+          && filteredReadRequestsCount == sl.filteredReadRequestsCount
+          && writeRequestsCount == sl.writeRequestsCount && rootIndexSizeKB == sl.rootIndexSizeKB
+          && totalStaticIndexSizeKB == sl.totalStaticIndexSizeKB
+          && totalStaticBloomSizeKB == sl.totalStaticBloomSizeKB
+          && totalCompactingKVs == sl.totalCompactingKVs
+          && currentCompactedKVs == sl.currentCompactedKVs;
     }
     return false;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/448ba3a7/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
new file mode 100644
index 0000000..1ef3126
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
@@ -0,0 +1,90 @@
+/**
+ * Copyright The Apache Software Foundation
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase;
+
+import edu.umd.cs.findbugs.annotations.Nullable;
+import java.util.List;
+import java.util.Map;
+import org.apache.hadoop.hbase.replication.ReplicationLoadSink;
+import org.apache.hadoop.hbase.replication.ReplicationLoadSource;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * This class is used for exporting current state of load on a RegionServer.
+ */
+@InterfaceAudience.Public
+public interface ServerMetrics {
+
+  ServerName getServerName();
+  /**
+   * @return the number of requests per second.
+   */
+  long getRequestCountPerSecond();
+
+  /**
+   * @return total Number of requests from the start of the region server.
+   */
+  long getRequestCount();
+
+  /**
+   * @return the amount of used heap
+   */
+  Size getUsedHeapSize();
+
+  /**
+   * @return the maximum allowable size of the heap
+   */
+  Size getMaxHeapSize();
+
+  int getInfoServerPort();
+
+  /**
+   * Call directly from client such as hbase shell
+   * @return the list of ReplicationLoadSource
+   */
+  List<ReplicationLoadSource> getReplicationLoadSourceList();
+
+  /**
+   * Call directly from client such as hbase shell
+   * @return ReplicationLoadSink
+   */
+  @Nullable
+  ReplicationLoadSink getReplicationLoadSink();
+
+  /**
+   * @return region load metrics
+   */
+  Map<byte[], RegionMetrics> getRegionMetrics();
+
+  /**
+   * Return the RegionServer-level and Region-level coprocessors
+   * @return string list of loaded RegionServer-level and Region-level coprocessors
+   */
+  List<String> getCoprocessorNames();
+
+  /**
+   * @return the timestamp (server side) of generating this metrics
+   */
+  long getReportTimestamp();
+
+  /**
+   * @return the last timestamp (server side) of generating this metrics
+   */
+  long getLastReportTimestamp();
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/448ba3a7/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java
new file mode 100644
index 0000000..e501c43
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java
@@ -0,0 +1,352 @@
+/**
+ * Copyright The Apache Software Foundation
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase;
+
+import edu.umd.cs.findbugs.annotations.Nullable;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.stream.Collectors;
+import org.apache.hadoop.hbase.replication.ReplicationLoadSink;
+import org.apache.hadoop.hbase.replication.ReplicationLoadSource;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Strings;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+
+@InterfaceAudience.Private
+public final class ServerMetricsBuilder {
+
+  /**
+   * @param sn the server name
+   * @return a empty metrics
+   */
+  public static ServerMetrics of(ServerName sn) {
+    return newBuilder(sn).build();
+  }
+
+  public static ServerMetrics toServerMetrics(ClusterStatusProtos.LiveServerInfo serverInfo) {
+    return toServerMetrics(ProtobufUtil.toServerName(serverInfo.getServer()),
+        serverInfo.getServerLoad());
+  }
+
+  public static ServerMetrics toServerMetrics(ServerName serverName,
+      ClusterStatusProtos.ServerLoad serverLoadPB) {
+    return ServerMetricsBuilder.newBuilder(serverName)
+        .setRequestCountPerSecond(serverLoadPB.getNumberOfRequests())
+        .setRequestCount(serverLoadPB.getTotalNumberOfRequests())
+        .setInfoServerPort(serverLoadPB.getInfoServerPort())
+        .setMaxHeapSize(new Size(serverLoadPB.getMaxHeapMB(), Size.Unit.MEGABYTE))
+        .setUsedHeapSize(new Size(serverLoadPB.getUsedHeapMB(), Size.Unit.MEGABYTE))
+        .setCoprocessorNames(serverLoadPB.getCoprocessorsList().stream()
+            .map(HBaseProtos.Coprocessor::getName).collect(Collectors.toList()))
+        .setRegionMetrics(serverLoadPB.getRegionLoadsList().stream()
+            .map(RegionMetricsBuilder::toRegionMetrics)
+            .collect(Collectors.toList()))
+        .setReplicationLoadSources(serverLoadPB.getReplLoadSourceList().stream()
+            .map(ProtobufUtil::toReplicationLoadSource)
+            .collect(Collectors.toList()))
+        .setReplicationLoadSink(serverLoadPB.hasReplLoadSink() ?
+            ProtobufUtil.toReplicationLoadSink(serverLoadPB.getReplLoadSink()) : null)
+        .setReportTimestamp(serverLoadPB.getReportEndTime())
+        .setLastReportTimestamp(serverLoadPB.getReportStartTime())
+        .build();
+  }
+
+  public static List<HBaseProtos.Coprocessor> toCoprocessor(List<String> names) {
+    return names.stream()
+        .map(n -> HBaseProtos.Coprocessor.newBuilder().setName(n).build())
+        .collect(Collectors.toList());
+  }
+
+  public static ClusterStatusProtos.ServerLoad toServerLoad(ServerMetrics metrics) {
+    ClusterStatusProtos.ServerLoad.Builder builder = ClusterStatusProtos.ServerLoad.newBuilder()
+        .setNumberOfRequests(metrics.getRequestCountPerSecond())
+        .setTotalNumberOfRequests(metrics.getRequestCount())
+        .setInfoServerPort(metrics.getInfoServerPort())
+        .setMaxHeapMB((int) metrics.getMaxHeapSize().get(Size.Unit.MEGABYTE))
+        .setUsedHeapMB((int) metrics.getUsedHeapSize().get(Size.Unit.MEGABYTE))
+        .addAllCoprocessors(toCoprocessor(metrics.getCoprocessorNames()))
+        .addAllRegionLoads(metrics.getRegionMetrics().values().stream()
+            .map(RegionMetricsBuilder::toRegionLoad)
+            .collect(Collectors.toList()))
+        .addAllReplLoadSource(metrics.getReplicationLoadSourceList().stream()
+            .map(ProtobufUtil::toReplicationLoadSource)
+            .collect(Collectors.toList()))
+        .setReportStartTime(metrics.getLastReportTimestamp())
+        .setReportEndTime(metrics.getReportTimestamp());
+    if (metrics.getReplicationLoadSink() != null) {
+      builder.setReplLoadSink(ProtobufUtil.toReplicationLoadSink(
+          metrics.getReplicationLoadSink()));
+    }
+    return builder.build();
+  }
+
+  public static ServerMetricsBuilder newBuilder(ServerName sn) {
+    return new ServerMetricsBuilder(sn);
+  }
+
+  private final ServerName serverName;
+  private long requestCountPerSecond;
+  private long requestCount;
+  private Size usedHeapSize = Size.ZERO;
+  private Size maxHeapSize = Size.ZERO;
+  private int infoServerPort;
+  private List<ReplicationLoadSource> sources = Collections.emptyList();
+  @Nullable
+  private ReplicationLoadSink sink = null;
+  private final Map<byte[], RegionMetrics> regionStatus = new TreeMap<>(Bytes.BYTES_COMPARATOR);
+  private List<String> coprocessorNames = Collections.emptyList();
+  private long reportTimestamp = System.currentTimeMillis();
+  private long lastReportTimestamp = 0;
+  private ServerMetricsBuilder(ServerName serverName) {
+    this.serverName = serverName;
+  }
+
+  public ServerMetricsBuilder setRequestCountPerSecond(long value) {
+    this.requestCountPerSecond = value;
+    return this;
+  }
+
+  public ServerMetricsBuilder setRequestCount(long value) {
+    this.requestCount = value;
+    return this;
+  }
+
+  public ServerMetricsBuilder setUsedHeapSize(Size value) {
+    this.usedHeapSize = value;
+    return this;
+  }
+
+  public ServerMetricsBuilder setMaxHeapSize(Size value) {
+    this.maxHeapSize = value;
+    return this;
+  }
+
+  public ServerMetricsBuilder setInfoServerPort(int value) {
+    this.infoServerPort = value;
+    return this;
+  }
+
+  public ServerMetricsBuilder setReplicationLoadSources(List<ReplicationLoadSource> value) {
+    this.sources = value;
+    return this;
+  }
+
+  public ServerMetricsBuilder setReplicationLoadSink(ReplicationLoadSink value) {
+    this.sink = value;
+    return this;
+  }
+
+  public ServerMetricsBuilder setRegionMetrics(List<RegionMetrics> value) {
+    value.forEach(v -> this.regionStatus.put(v.getRegionName(), v));
+    return this;
+  }
+
+  public ServerMetricsBuilder setCoprocessorNames(List<String> value) {
+    this.coprocessorNames = value;
+    return this;
+  }
+
+  public ServerMetricsBuilder setReportTimestamp(long value) {
+    this.reportTimestamp = value;
+    return this;
+  }
+
+  public ServerMetricsBuilder setLastReportTimestamp(long value) {
+    this.lastReportTimestamp = value;
+    return this;
+  }
+
+  public ServerMetrics build() {
+    return new ServerMetricsImpl(
+        serverName,
+        requestCountPerSecond,
+        requestCount,
+        usedHeapSize,
+        maxHeapSize,
+        infoServerPort,
+        sources,
+        sink,
+        regionStatus,
+        coprocessorNames,
+        reportTimestamp,
+        lastReportTimestamp);
+  }
+
+  private static class ServerMetricsImpl implements ServerMetrics {
+    private final ServerName serverName;
+    private final long requestCountPerSecond;
+    private final long requestCount;
+    private final Size usedHeapSize;
+    private final Size maxHeapSize;
+    private final int infoServerPort;
+    private final List<ReplicationLoadSource> sources;
+    @Nullable
+    private final ReplicationLoadSink sink;
+    private final Map<byte[], RegionMetrics> regionStatus;
+    private final List<String> coprocessorNames;
+    private final long reportTimestamp;
+    private final long lastReportTimestamp;
+
+    ServerMetricsImpl(ServerName serverName, long requestCountPerSecond, long requestCount,
+      Size usedHeapSize, Size maxHeapSize, int infoServerPort, List<ReplicationLoadSource> sources,
+      ReplicationLoadSink sink, Map<byte[], RegionMetrics> regionStatus,
+      List<String> coprocessorNames, long reportTimestamp, long lastReportTimestamp) {
+      this.serverName = Preconditions.checkNotNull(serverName);
+      this.requestCountPerSecond = requestCountPerSecond;
+      this.requestCount = requestCount;
+      this.usedHeapSize = Preconditions.checkNotNull(usedHeapSize);
+      this.maxHeapSize = Preconditions.checkNotNull(maxHeapSize);
+      this.infoServerPort = infoServerPort;
+      this.sources = Preconditions.checkNotNull(sources);
+      this.sink = sink;
+      this.regionStatus = Preconditions.checkNotNull(regionStatus);
+      this.coprocessorNames =Preconditions.checkNotNull(coprocessorNames);
+      this.reportTimestamp = reportTimestamp;
+      this.lastReportTimestamp = lastReportTimestamp;
+    }
+
+    @Override
+    public ServerName getServerName() {
+      return serverName;
+    }
+    @Override
+    public long getRequestCountPerSecond() {
+      return requestCountPerSecond;
+    }
+
+    @Override
+    public long getRequestCount() {
+      return requestCount;
+    }
+
+    @Override
+    public Size getUsedHeapSize() {
+      return usedHeapSize;
+    }
+
+    @Override
+    public Size getMaxHeapSize() {
+      return maxHeapSize;
+    }
+
+    @Override
+    public int getInfoServerPort() {
+      return infoServerPort;
+    }
+
+    @Override
+    public List<ReplicationLoadSource> getReplicationLoadSourceList() {
+      return Collections.unmodifiableList(sources);
+    }
+
+    @Override
+    public ReplicationLoadSink getReplicationLoadSink() {
+      return sink;
+    }
+
+    @Override
+    public Map<byte[], RegionMetrics> getRegionMetrics() {
+      return Collections.unmodifiableMap(regionStatus);
+    }
+
+    @Override
+    public List<String> getCoprocessorNames() {
+      return Collections.unmodifiableList(coprocessorNames);
+    }
+
+    @Override
+    public long getReportTimestamp() {
+      return reportTimestamp;
+    }
+
+    @Override
+    public long getLastReportTimestamp() {
+      return lastReportTimestamp;
+    }
+
+    @Override
+    public String toString() {
+      int storeCount = 0;
+      int storeFileCount = 0;
+      long uncompressedStoreFileSizeMB = 0;
+      long storeFileSizeMB = 0;
+      long memStoreSizeMB = 0;
+      long storefileIndexSizeKB = 0;
+      long rootLevelIndexSizeKB = 0;
+      long readRequestsCount = 0;
+      long writeRequestsCount = 0;
+      long filteredReadRequestsCount = 0;
+      long bloomFilterSizeMB = 0;
+      long compactingCellCount = 0;
+      long compactedCellCount = 0;
+      for (RegionMetrics r : getRegionMetrics().values()) {
+        storeCount += r.getStoreCount();
+        storeFileCount += r.getStoreFileCount();
+        uncompressedStoreFileSizeMB += r.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE);
+        storeFileSizeMB += r.getStoreFileSize().get(Size.Unit.MEGABYTE);
+        memStoreSizeMB += r.getMemStoreSize().get(Size.Unit.MEGABYTE);
+        storefileIndexSizeKB += r.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE);
+        readRequestsCount += r.getReadRequestCount();
+        writeRequestsCount += r.getWriteRequestCount();
+        filteredReadRequestsCount += r.getFilteredReadRequestCount();
+        rootLevelIndexSizeKB += r.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE);
+        bloomFilterSizeMB += r.getBloomFilterSize().get(Size.Unit.MEGABYTE);
+        compactedCellCount += r.getCompactedCellCount();
+        compactingCellCount += r.getCompactingCellCount();
+      }
+      StringBuilder sb = Strings.appendKeyValue(new StringBuilder(), "requestsPerSecond",
+            Double.valueOf(getRequestCountPerSecond()));
+      Strings.appendKeyValue(sb, "numberOfOnlineRegions",
+          Integer.valueOf(getRegionMetrics().size()));
+      Strings.appendKeyValue(sb, "usedHeapMB", getUsedHeapSize());
+      Strings.appendKeyValue(sb, "maxHeapMB", getMaxHeapSize());
+      Strings.appendKeyValue(sb, "numberOfStores", storeCount);
+      Strings.appendKeyValue(sb, "numberOfStorefiles", storeFileCount);
+      Strings.appendKeyValue(sb, "storefileUncompressedSizeMB", uncompressedStoreFileSizeMB);
+      Strings.appendKeyValue(sb, "storefileSizeMB", storeFileSizeMB);
+      if (uncompressedStoreFileSizeMB != 0) {
+        Strings.appendKeyValue(sb, "compressionRatio", String.format("%.4f",
+            (float) storeFileSizeMB / (float) uncompressedStoreFileSizeMB));
+      }
+      Strings.appendKeyValue(sb, "memstoreSizeMB", memStoreSizeMB);
+      Strings.appendKeyValue(sb, "readRequestsCount", readRequestsCount);
+      Strings.appendKeyValue(sb, "filteredReadRequestsCount", filteredReadRequestsCount);
+      Strings.appendKeyValue(sb, "writeRequestsCount", writeRequestsCount);
+      Strings.appendKeyValue(sb, "rootIndexSizeKB", rootLevelIndexSizeKB);
+      Strings.appendKeyValue(sb, "totalStaticIndexSizeKB", storefileIndexSizeKB);
+      Strings.appendKeyValue(sb, "totalStaticBloomSizeKB", bloomFilterSizeMB);
+      Strings.appendKeyValue(sb, "totalCompactingKVs", compactingCellCount);
+      Strings.appendKeyValue(sb, "currentCompactedKVs", compactedCellCount);
+      float compactionProgressPct = Float.NaN;
+      if (compactingCellCount > 0) {
+        compactionProgressPct =
+            Float.valueOf((float) compactedCellCount / compactingCellCount);
+      }
+      Strings.appendKeyValue(sb, "compactionProgressPct", compactionProgressPct);
+      Strings.appendKeyValue(sb, "coprocessors", getCoprocessorNames());
+      return sb.toString();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/448ba3a7/hbase-client/src/main/java/org/apache/hadoop/hbase/Size.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/Size.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/Size.java
new file mode 100644
index 0000000..87d7554
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/Size.java
@@ -0,0 +1,158 @@
+/**
+ * Copyright The Apache Software Foundation
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase;
+
+import java.math.BigDecimal;
+import java.util.Objects;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
+
+/**
+ * It is used to represent the size with different units.
+ * This class doesn't serve for the precise computation.
+ */
+@InterfaceAudience.Public
+public final class Size implements Comparable<Size> {
+  public static final Size ZERO = new Size(0, Unit.KILOBYTE);
+  private static final BigDecimal SCALE_BASE = BigDecimal.valueOf(1024D);
+
+  public enum Unit {
+    // keep the room to add more units for HBase 10.x
+    PETABYTE(100, "PB"),
+    TERABYTE(99, "TB"),
+    GIGABYTE(98, "GB"),
+    MEGABYTE(97, "MB"),
+    KILOBYTE(96, "KB"),
+    BYTE(95, "B");
+    private final int orderOfSize;
+    private final String simpleName;
+
+    Unit(int orderOfSize, String simpleName) {
+      this.orderOfSize = orderOfSize;
+      this.simpleName = simpleName;
+    }
+
+    public int getOrderOfSize() {
+      return orderOfSize;
+    }
+
+    public String getSimpleName() {
+      return simpleName;
+    }
+  }
+
+  private final double value;
+  private final Unit unit;
+
+  public Size(double value, Unit unit) {
+    if (value < 0) {
+      throw new IllegalArgumentException("The value:" + value + " can't be negative");
+    }
+    this.value = value;
+    this.unit = Preconditions.checkNotNull(unit);
+  }
+
+  /**
+   * @return size unit
+   */
+  public Unit getUnit() {
+    return unit;
+  }
+
+  /**
+   * get the value
+   */
+  public long getLongValue() {
+    return (long) value;
+  }
+
+  /**
+   * get the value
+   */
+  public double get() {
+    return value;
+  }
+
+  /**
+   * get the value which is converted to specified unit.
+   *
+   * @param unit size unit
+   * @return the converted value
+   */
+  public double get(Unit unit) {
+    if (value == 0) {
+      return value;
+    }
+    int diff = this.unit.getOrderOfSize() - unit.getOrderOfSize();
+    if (diff == 0) {
+      return value;
+    }
+
+    BigDecimal rval = BigDecimal.valueOf(value);
+    for (int i = 0; i != Math.abs(diff); ++i) {
+      rval = diff > 0 ? rval.multiply(SCALE_BASE) : rval.divide(SCALE_BASE);
+    }
+    return rval.doubleValue();
+  }
+
+  @Override
+  public int compareTo(Size other) {
+    int diff = unit.getOrderOfSize() - other.unit.getOrderOfSize();
+    if (diff == 0) {
+      return Double.compare(value, other.value);
+    }
+
+    BigDecimal thisValue = BigDecimal.valueOf(value);
+    BigDecimal otherValue = BigDecimal.valueOf(other.value);
+    if (diff > 0) {
+      for (int i = 0; i != Math.abs(diff); ++i) {
+        thisValue = thisValue.multiply(SCALE_BASE);
+      }
+    } else {
+      for (int i = 0; i != Math.abs(diff); ++i) {
+        otherValue = otherValue.multiply(SCALE_BASE);
+      }
+    }
+    return thisValue.compareTo(otherValue);
+  }
+
+  @Override
+  public String toString() {
+    return value + unit.getSimpleName();
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (obj == null) {
+      return false;
+    }
+    if (obj == this) {
+      return true;
+    }
+    if (obj instanceof Size) {
+      return compareTo((Size)obj) == 0;
+    }
+    return false;
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(value, unit);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/448ba3a7/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 0567e8e..de022d3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -28,12 +28,11 @@ import java.util.Set;
 import java.util.TreeMap;
 import java.util.concurrent.Future;
 import java.util.regex.Pattern;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.CacheEvictionStats;
+import org.apache.hadoop.hbase.ClusterMetrics.Option;
 import org.apache.hadoop.hbase.ClusterStatus;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
@@ -43,7 +42,6 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.replication.TableCFs;
 import org.apache.hadoop.hbase.client.security.SecurityCapability;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
@@ -60,6 +58,7 @@ import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
 import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * The administrative API for HBase. Obtain an instance from {@link Connection#getAdmin()} and

http://git-wip-us.apache.org/repos/asf/hbase/blob/448ba3a7/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index 2ae51ac..c01d5fa 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hbase.client;
 
 import com.google.protobuf.RpcChannel;
-
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.EnumSet;
@@ -29,9 +28,8 @@ import java.util.Set;
 import java.util.concurrent.CompletableFuture;
 import java.util.function.Function;
 import java.util.regex.Pattern;
-
+import org.apache.hadoop.hbase.ClusterMetrics.Option;
 import org.apache.hadoop.hbase.ClusterStatus;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.RegionLoad;
 import org.apache.hadoop.hbase.ServerName;

http://git-wip-us.apache.org/repos/asf/hbase/blob/448ba3a7/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
index 0f0679d..09fdeff 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hbase.client;
 
 import com.google.protobuf.RpcChannel;
-
 import java.util.Collection;
 import java.util.EnumSet;
 import java.util.List;
@@ -29,9 +28,8 @@ import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ExecutorService;
 import java.util.function.Function;
 import java.util.regex.Pattern;
-
+import org.apache.hadoop.hbase.ClusterMetrics.Option;
 import org.apache.hadoop.hbase.ClusterStatus;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.RegionLoad;
 import org.apache.hadoop.hbase.ServerName;

http://git-wip-us.apache.org/repos/asf/hbase/blob/448ba3a7/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java
index 5ff1b67..0c53ee4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java
@@ -264,7 +264,7 @@ class ClusterStatusListener implements Closeable {
         ByteBufInputStream bis = new ByteBufInputStream(dp.content());
         try {
           ClusterStatusProtos.ClusterStatus csp = ClusterStatusProtos.ClusterStatus.parseFrom(bis);
-          ClusterStatus ncs = ProtobufUtil.convert(csp);
+          ClusterStatus ncs = ProtobufUtil.toClusterStatus(csp);
           receive(ncs);
         } finally {
           bis.close();

http://git-wip-us.apache.org/repos/asf/hbase/blob/448ba3a7/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 600ee69..bbcc825 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.client;
 import com.google.protobuf.Descriptors;
 import com.google.protobuf.Message;
 import com.google.protobuf.RpcController;
-
 import java.io.Closeable;
 import java.io.IOException;
 import java.io.InterruptedIOException;
@@ -45,13 +44,12 @@ import java.util.concurrent.atomic.AtomicReference;
 import java.util.regex.Pattern;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.CacheEvictionStats;
 import org.apache.hadoop.hbase.CacheEvictionStatsBuilder;
+import org.apache.hadoop.hbase.ClusterMetrics.Option;
 import org.apache.hadoop.hbase.ClusterStatus;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
@@ -103,6 +101,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.yetus.audience.InterfaceStability;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
@@ -2086,7 +2085,7 @@ public class HBaseAdmin implements Admin {
       @Override
       protected ClusterStatus rpcCall() throws Exception {
         GetClusterStatusRequest req = RequestConverter.buildGetClusterStatusRequest(options);
-        return ProtobufUtil.convert(
+        return ProtobufUtil.toClusterStatus(
           master.getClusterStatus(getRpcController(), req).getClusterStatus());
       }
     });

http://git-wip-us.apache.org/repos/asf/hbase/blob/448ba3a7/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
index bb427b1..5e1c654 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
@@ -23,7 +23,6 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.protobuf.Message;
 import com.google.protobuf.RpcChannel;
-
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -43,12 +42,11 @@ import java.util.function.Function;
 import java.util.regex.Pattern;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
-
 import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.AsyncMetaTableAccessor;
+import org.apache.hadoop.hbase.ClusterMetrics.Option;
 import org.apache.hadoop.hbase.ClusterStatus;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.MetaTableAccessor;
@@ -87,6 +85,7 @@ import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
 import org.apache.hadoop.hbase.shaded.io.netty.util.HashedWheelTimer;
 import org.apache.hadoop.hbase.shaded.io.netty.util.Timeout;
@@ -2617,7 +2616,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
               .<GetClusterStatusRequest, GetClusterStatusResponse, ClusterStatus> call(controller,
                 stub, RequestConverter.buildGetClusterStatusRequest(options),
                 (s, c, req, done) -> s.getClusterStatus(c, req, done),
-                resp -> ProtobufUtil.convert(resp.getClusterStatus()))).call();
+                resp -> ProtobufUtil.toClusterStatus(resp.getClusterStatus()))).call();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/448ba3a7/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSink.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSink.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSink.java
index 7ae0e0f..de4d4ca 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSink.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSink.java
@@ -15,11 +15,13 @@ import org.apache.yetus.audience.InterfaceAudience;
 /**
  * A HBase ReplicationLoad to present MetricsSink information
  */
-@InterfaceAudience.Private
+@InterfaceAudience.Public
 public class ReplicationLoadSink {
-  private long ageOfLastAppliedOp;
-  private long timeStampsOfLastAppliedOp;
+  private final long ageOfLastAppliedOp;
+  private final long timeStampsOfLastAppliedOp;
 
+  // TODO: add the builder for this class
+  @InterfaceAudience.Private
   public ReplicationLoadSink(long age, long timeStamp) {
     this.ageOfLastAppliedOp = age;
     this.timeStampsOfLastAppliedOp = timeStamp;

http://git-wip-us.apache.org/repos/asf/hbase/blob/448ba3a7/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSource.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSource.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSource.java
index 0b02805..845320c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSource.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSource.java
@@ -15,14 +15,16 @@ import org.apache.yetus.audience.InterfaceAudience;
 /**
  * A HBase ReplicationLoad to present MetricsSource information
  */
-@InterfaceAudience.Private
+@InterfaceAudience.Public
 public class ReplicationLoadSource {
-  private String peerID;
-  private long ageOfLastShippedOp;
-  private int sizeOfLogQueue;
-  private long timeStampOfLastShippedOp;
-  private long replicationLag;
-
+  private final String peerID;
+  private final long ageOfLastShippedOp;
+  private final int sizeOfLogQueue;
+  private final long timeStampOfLastShippedOp;
+  private final long replicationLag;
+
+  // TODO: add the builder for this class
+  @InterfaceAudience.Private
   public ReplicationLoadSource(String id, long age, int size, long timeStamp, long lag) {
     this.peerID = id;
     this.ageOfLastShippedOp = age;

http://git-wip-us.apache.org/repos/asf/hbase/blob/448ba3a7/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index c9ea5a5..17b1141 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -25,7 +25,6 @@ import java.lang.reflect.Method;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Collections;
-import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Locale;
@@ -49,9 +48,8 @@ import org.apache.hadoop.hbase.Cell.DataType;
 import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.ClusterId;
+import org.apache.hadoop.hbase.ClusterMetricsBuilder;
 import org.apache.hadoop.hbase.ClusterStatus;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.ExtendedCellBuilder;
 import org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
@@ -60,7 +58,6 @@ import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
-import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Append;
@@ -92,7 +89,6 @@ import org.apache.hadoop.hbase.filter.ByteArrayComparable;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.io.LimitInputStream;
 import org.apache.hadoop.hbase.io.TimeRange;
-import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.protobuf.ProtobufMagic;
 import org.apache.hadoop.hbase.protobuf.ProtobufMessageConverter;
 import org.apache.hadoop.hbase.quotas.QuotaScope;
@@ -150,11 +146,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationPr
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.LiveServerInfo;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.FSProtos.HBaseVersionFileContent;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair;
@@ -2731,23 +2724,14 @@ public final class ProtobufUtil {
   }
 
   public static ReplicationLoadSink toReplicationLoadSink(
-      ClusterStatusProtos.ReplicationLoadSink cls) {
-    return new ReplicationLoadSink(cls.getAgeOfLastAppliedOp(), cls.getTimeStampsOfLastAppliedOp());
+      ClusterStatusProtos.ReplicationLoadSink rls) {
+    return new ReplicationLoadSink(rls.getAgeOfLastAppliedOp(), rls.getTimeStampsOfLastAppliedOp());
   }
 
   public static ReplicationLoadSource toReplicationLoadSource(
-      ClusterStatusProtos.ReplicationLoadSource cls) {
-    return new ReplicationLoadSource(cls.getPeerID(), cls.getAgeOfLastShippedOp(),
-        cls.getSizeOfLogQueue(), cls.getTimeStampOfLastShippedOp(), cls.getReplicationLag());
-  }
-
-  public static List<ReplicationLoadSource> toReplicationLoadSourceList(
-      List<ClusterStatusProtos.ReplicationLoadSource> clsList) {
-    ArrayList<ReplicationLoadSource> rlsList = new ArrayList<>(clsList.size());
-    for (ClusterStatusProtos.ReplicationLoadSource cls : clsList) {
-      rlsList.add(toReplicationLoadSource(cls));
-    }
-    return rlsList;
+      ClusterStatusProtos.ReplicationLoadSource rls) {
+    return new ReplicationLoadSource(rls.getPeerID(), rls.getAgeOfLastShippedOp(),
+      rls.getSizeOfLogQueue(), rls.getTimeStampOfLastShippedOp(), rls.getReplicationLag());
   }
 
   /**
@@ -2991,213 +2975,8 @@ public final class ProtobufUtil {
    * @param proto the protobuf ClusterStatus
    * @return the converted ClusterStatus
    */
-  public static ClusterStatus convert(ClusterStatusProtos.ClusterStatus proto) {
-    ClusterStatus.Builder builder = ClusterStatus.newBuilder();
-
-    Map<ServerName, ServerLoad> servers = null;
-    servers = new HashMap<>(proto.getLiveServersList().size());
-    for (LiveServerInfo lsi : proto.getLiveServersList()) {
-      servers.put(ProtobufUtil.toServerName(
-          lsi.getServer()), new ServerLoad(lsi.getServerLoad()));
-    }
-
-    List<ServerName> deadServers = new ArrayList<>(proto.getDeadServersList().size());
-    for (HBaseProtos.ServerName sn : proto.getDeadServersList()) {
-      deadServers.add(ProtobufUtil.toServerName(sn));
-    }
-
-    List<ServerName> backupMasters = new ArrayList<>(proto.getBackupMastersList().size());
-    for (HBaseProtos.ServerName sn : proto.getBackupMastersList()) {
-      backupMasters.add(ProtobufUtil.toServerName(sn));
-    }
-
-    List<RegionState> rit =
-      new ArrayList<>(proto.getRegionsInTransitionList().size());
-    for (RegionInTransition region : proto.getRegionsInTransitionList()) {
-      RegionState value = RegionState.convert(region.getRegionState());
-      rit.add(value);
-    }
-
-    String[] masterCoprocessors = null;
-    final int numMasterCoprocessors = proto.getMasterCoprocessorsCount();
-    masterCoprocessors = new String[numMasterCoprocessors];
-    for (int i = 0; i < numMasterCoprocessors; i++) {
-      masterCoprocessors[i] = proto.getMasterCoprocessors(i).getName();
-    }
-
-    String clusterId = null;
-    if (proto.hasClusterId()) {
-      clusterId = ClusterId.convert(proto.getClusterId()).toString();
-    }
-
-    String hbaseVersion = null;
-    if (proto.hasHbaseVersion()) {
-      hbaseVersion = proto.getHbaseVersion().getVersion();
-    }
-
-    ServerName master = null;
-    if (proto.hasMaster()) {
-      master = ProtobufUtil.toServerName(proto.getMaster());
-    }
-
-    Boolean balancerOn = null;
-    if (proto.hasBalancerOn()) {
-      balancerOn = proto.getBalancerOn();
-    }
-
-    builder.setHBaseVersion(hbaseVersion)
-           .setClusterId(clusterId)
-           .setLiveServers(servers)
-           .setDeadServers(deadServers)
-           .setMaster(master)
-           .setBackupMasters(backupMasters)
-           .setRegionState(rit)
-           .setMasterCoprocessors(masterCoprocessors)
-           .setBalancerOn(balancerOn);
-    if (proto.hasMasterInfoPort()) {
-      builder.setMasterInfoPort(proto.getMasterInfoPort());
-    }
-    return builder.build();
-  }
-
-  /**
-   * Convert ClusterStatusProtos.Option to ClusterStatus.Option
-   * @param option a ClusterStatusProtos.Option
-   * @return converted ClusterStatus.Option
-   */
-  public static ClusterStatus.Option toOption(ClusterStatusProtos.Option option) {
-    switch (option) {
-      case HBASE_VERSION: return ClusterStatus.Option.HBASE_VERSION;
-      case LIVE_SERVERS: return ClusterStatus.Option.LIVE_SERVERS;
-      case DEAD_SERVERS: return ClusterStatus.Option.DEAD_SERVERS;
-      case REGIONS_IN_TRANSITION: return ClusterStatus.Option.REGIONS_IN_TRANSITION;
-      case CLUSTER_ID: return ClusterStatus.Option.CLUSTER_ID;
-      case MASTER_COPROCESSORS: return ClusterStatus.Option.MASTER_COPROCESSORS;
-      case MASTER: return ClusterStatus.Option.MASTER;
-      case BACKUP_MASTERS: return ClusterStatus.Option.BACKUP_MASTERS;
-      case BALANCER_ON: return ClusterStatus.Option.BALANCER_ON;
-      case MASTER_INFO_PORT: return ClusterStatus.Option.MASTER_INFO_PORT;
-      // should not reach here
-      default: throw new IllegalArgumentException("Invalid option: " + option);
-    }
-  }
-
-  /**
-   * Convert ClusterStatus.Option to ClusterStatusProtos.Option
-   * @param option a ClusterStatus.Option
-   * @return converted ClusterStatusProtos.Option
-   */
-  public static ClusterStatusProtos.Option toOption(ClusterStatus.Option option) {
-    switch (option) {
-      case HBASE_VERSION: return ClusterStatusProtos.Option.HBASE_VERSION;
-      case LIVE_SERVERS: return ClusterStatusProtos.Option.LIVE_SERVERS;
-      case DEAD_SERVERS: return ClusterStatusProtos.Option.DEAD_SERVERS;
-      case REGIONS_IN_TRANSITION: return ClusterStatusProtos.Option.REGIONS_IN_TRANSITION;
-      case CLUSTER_ID: return ClusterStatusProtos.Option.CLUSTER_ID;
-      case MASTER_COPROCESSORS: return ClusterStatusProtos.Option.MASTER_COPROCESSORS;
-      case MASTER: return ClusterStatusProtos.Option.MASTER;
-      case BACKUP_MASTERS: return ClusterStatusProtos.Option.BACKUP_MASTERS;
-      case BALANCER_ON: return ClusterStatusProtos.Option.BALANCER_ON;
-      case MASTER_INFO_PORT: return ClusterStatusProtos.Option.MASTER_INFO_PORT;
-      // should not reach here
-      default: throw new IllegalArgumentException("Invalid option: " + option);
-    }
-  }
-
-  /**
-   * Convert a list of ClusterStatusProtos.Option to an enum set of ClusterStatus.Option
-   * @param options
-   * @return an enum set of ClusterStatus.Option
-   */
-  public static EnumSet<Option> toOptions(List<ClusterStatusProtos.Option> options) {
-    EnumSet<Option> result = EnumSet.noneOf(Option.class);
-    for (ClusterStatusProtos.Option opt : options) {
-      result.add(toOption(opt));
-    }
-    return result;
-  }
-
-  /**
-   * Convert an enum set of ClusterStatus.Option to a list of ClusterStatusProtos.Option
-   * @param options
-   * @return a list of ClusterStatusProtos.Option
-   */
-  public static List<ClusterStatusProtos.Option> toOptions(EnumSet<Option> options) {
-    List<ClusterStatusProtos.Option> result = new ArrayList<>(options.size());
-    for (ClusterStatus.Option opt : options) {
-      result.add(toOption(opt));
-    }
-    return result;
-  }
-
-  /**
-   * Convert a ClusterStatus to a protobuf ClusterStatus
-   *
-   * @return the protobuf ClusterStatus
-   */
-  public static ClusterStatusProtos.ClusterStatus convert(ClusterStatus status) {
-    ClusterStatusProtos.ClusterStatus.Builder builder =
-        ClusterStatusProtos.ClusterStatus.newBuilder();
-    if (status.getHBaseVersion() != null) {
-       builder.setHbaseVersion(
-         HBaseVersionFileContent.newBuilder()
-                                .setVersion(status.getHBaseVersion()));
-    }
-
-    if (status.getServers() != null) {
-      for (ServerName serverName : status.getServers()) {
-        LiveServerInfo.Builder lsi =
-            LiveServerInfo.newBuilder().setServer(ProtobufUtil.toServerName(serverName));
-        lsi.setServerLoad(status.getLoad(serverName).obtainServerLoadPB());
-        builder.addLiveServers(lsi.build());
-      }
-    }
-
-    if (status.getDeadServerNames() != null) {
-      for (ServerName deadServer : status.getDeadServerNames()) {
-        builder.addDeadServers(ProtobufUtil.toServerName(deadServer));
-      }
-    }
-
-    if (status.getRegionsInTransition() != null) {
-      for (RegionState rit : status.getRegionsInTransition()) {
-        ClusterStatusProtos.RegionState rs = rit.convert();
-        RegionSpecifier.Builder spec =
-            RegionSpecifier.newBuilder().setType(RegionSpecifierType.REGION_NAME);
-        spec.setValue(UnsafeByteOperations.unsafeWrap(rit.getRegion().getRegionName()));
-
-        RegionInTransition pbRIT =
-            RegionInTransition.newBuilder().setSpec(spec.build()).setRegionState(rs).build();
-        builder.addRegionsInTransition(pbRIT);
-      }
-    }
-
-    if (status.getClusterId() != null) {
-      builder.setClusterId(new ClusterId(status.getClusterId()).convert());
-    }
-
-    if (status.getMasterCoprocessors() != null) {
-      for (String coprocessor : status.getMasterCoprocessors()) {
-        builder.addMasterCoprocessors(HBaseProtos.Coprocessor.newBuilder().setName(coprocessor));
-      }
-    }
-
-    if (status.getMaster() != null) {
-      builder.setMaster(ProtobufUtil.toServerName(status.getMaster()));
-    }
-
-    if (status.getBackupMasters() != null) {
-      for (ServerName backup : status.getBackupMasters()) {
-        builder.addBackupMasters(ProtobufUtil.toServerName(backup));
-      }
-    }
-
-    if (status.getBalancerOn() != null) {
-      builder.setBalancerOn(status.getBalancerOn());
-    }
-
-    builder.setMasterInfoPort(status.getMasterInfoPort());
-    return builder.build();
+  public static ClusterStatus toClusterStatus(ClusterStatusProtos.ClusterStatus proto) {
+    return new ClusterStatus(ClusterMetricsBuilder.toClusterMetrics(proto));
   }
 
   public static RegionLoadStats createRegionLoadStats(ClientProtos.RegionLoadStats stats) {
@@ -3459,4 +3238,23 @@ public final class ProtobufUtil {
         .setMaxCacheSize(cacheEvictionStats.getMaxCacheSize())
         .build();
   }
+
+  public static ClusterStatusProtos.ReplicationLoadSource toReplicationLoadSource(
+      ReplicationLoadSource rls) {
+    return ClusterStatusProtos.ReplicationLoadSource.newBuilder()
+        .setPeerID(rls.getPeerID())
+        .setAgeOfLastShippedOp(rls.getAgeOfLastShippedOp())
+        .setSizeOfLogQueue((int) rls.getSizeOfLogQueue())
+        .setTimeStampOfLastShippedOp(rls.getTimeStampOfLastShippedOp())
+        .setReplicationLag(rls.getReplicationLag())
+        .build();
+  }
+
+  public static ClusterStatusProtos.ReplicationLoadSink toReplicationLoadSink(
+      ReplicationLoadSink rls) {
+    return ClusterStatusProtos.ReplicationLoadSink.newBuilder()
+        .setAgeOfLastAppliedOp(rls.getAgeOfLastAppliedOp())
+        .setTimeStampsOfLastAppliedOp(rls.getTimeStampsOfLastAppliedOp())
+        .build();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/448ba3a7/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index 5cbd2bf..7e8e903 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -27,7 +27,8 @@ import java.util.Set;
 import java.util.regex.Pattern;
 
 import org.apache.hadoop.hbase.CellScannable;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
+import org.apache.hadoop.hbase.ClusterMetrics.Option;
+import org.apache.hadoop.hbase.ClusterMetricsBuilder;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
@@ -1523,7 +1524,7 @@ public final class RequestConverter {
    */
   public static GetClusterStatusRequest buildGetClusterStatusRequest(EnumSet<Option> options) {
     return GetClusterStatusRequest.newBuilder()
-                                  .addAllOptions(ProtobufUtil.toOptions(options))
+                                  .addAllOptions(ClusterMetricsBuilder.toOptions(options))
                                   .build();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/448ba3a7/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java
index 47fcd84..299e51b 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java
@@ -22,12 +22,11 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.EnumSet;
 import java.util.List;
-
 import org.apache.commons.lang3.RandomUtils;
+import org.apache.hadoop.hbase.ClusterMetrics.Option;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.chaos.factories.MonkeyConstants;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.util.Bytes;

http://git-wip-us.apache.org/repos/asf/hbase/blob/448ba3a7/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
index 2588e63..6c2e635 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
+import org.apache.hadoop.hbase.ClusterMetrics.Option;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.IntegrationTestBase;

http://git-wip-us.apache.org/repos/asf/hbase/blob/448ba3a7/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
index 826db07..e1e73ff 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
@@ -49,7 +49,7 @@ import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
+import org.apache.hadoop.hbase.ClusterMetrics.Option;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;

http://git-wip-us.apache.org/repos/asf/hbase/blob/448ba3a7/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
index 90ebccb..7ee1065 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.rest;
 
 import java.io.IOException;
 import java.util.EnumSet;
-
 import javax.ws.rs.GET;
 import javax.ws.rs.Produces;
 import javax.ws.rs.core.CacheControl;
@@ -29,16 +28,15 @@ import javax.ws.rs.core.Context;
 import javax.ws.rs.core.Response;
 import javax.ws.rs.core.Response.ResponseBuilder;
 import javax.ws.rs.core.UriInfo;
-
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import org.apache.hadoop.hbase.ClusterMetrics.Option;
 import org.apache.hadoop.hbase.ClusterStatus;
-import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.RegionLoad;
+import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @InterfaceAudience.Private
 public class StorageClusterStatusResource extends ResourceBase {

http://git-wip-us.apache.org/repos/asf/hbase/blob/448ba3a7/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
index 3d70410..3ac6566 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
@@ -21,20 +21,18 @@ package org.apache.hadoop.hbase.rest;
 
 import java.io.IOException;
 import java.util.EnumSet;
-
 import javax.ws.rs.GET;
 import javax.ws.rs.Produces;
 import javax.ws.rs.core.CacheControl;
 import javax.ws.rs.core.Context;
 import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
 import javax.ws.rs.core.Response.ResponseBuilder;
-
+import javax.ws.rs.core.UriInfo;
+import org.apache.hadoop.hbase.ClusterMetrics.Option;
+import org.apache.hadoop.hbase.rest.model.StorageClusterVersionModel;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
-import org.apache.hadoop.hbase.rest.model.StorageClusterVersionModel;
 
 @InterfaceAudience.Private
 public class StorageClusterVersionResource extends ResourceBase {

http://git-wip-us.apache.org/repos/asf/hbase/blob/448ba3a7/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
----------------------------------------------------------------------
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
index cd8c386..e4386bc 100644
--- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
+++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
@@ -33,9 +33,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeMap;
-
+import org.apache.hadoop.hbase.ClusterMetrics.Option;
 import org.apache.hadoop.hbase.ClusterStatus;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.HBaseCluster;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -59,6 +58,7 @@ import org.junit.Test;
 import org.junit.rules.TestName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
@@ -525,7 +525,7 @@ public abstract class TestRSGroupsBase {
             getTableRegionMap().get(tableName) != null &&
                 getTableRegionMap().get(tableName).size() == 6 &&
                 admin.getClusterStatus(EnumSet.of(Option.REGIONS_IN_TRANSITION))
-                     .getRegionsInTransition().size() < 1;
+                     .getRegionStatesInTransition().size() < 1;
       }
     });
 
@@ -609,7 +609,7 @@ public abstract class TestRSGroupsBase {
     TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
       @Override
       public boolean evaluate() throws Exception {
-        return cluster.getClusterStatus().getRegionsInTransition().isEmpty();
+        return cluster.getClusterStatus().getRegionStatesInTransition().isEmpty();
       }
     });
     Set<Address> newServers = Sets.newHashSet();
@@ -626,7 +626,7 @@ public abstract class TestRSGroupsBase {
     TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
       @Override
       public boolean evaluate() throws Exception {
-        return cluster.getClusterStatus().getRegionsInTransition().isEmpty();
+        return cluster.getClusterStatus().getRegionStatesInTransition().isEmpty();
       }
     });
 
@@ -830,7 +830,7 @@ public abstract class TestRSGroupsBase {
                 getTableRegionMap().get(tableName).size() == 5 &&
                 getTableServerRegionMap().get(tableName).size() == 1 &&
                 admin.getClusterStatus(EnumSet.of(Option.REGIONS_IN_TRANSITION))
-                     .getRegionsInTransition().size() < 1;
+                     .getRegionStatesInTransition().size() < 1;
       }
     });
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/448ba3a7/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/BackupMasterStatusTmpl.jamon
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/BackupMasterStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/BackupMasterStatusTmpl.jamon
index ee5b33f..e1f864e 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/BackupMasterStatusTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/BackupMasterStatusTmpl.jamon
@@ -22,7 +22,7 @@ HMaster master;
 <%import>
 java.util.*;
 org.apache.hadoop.hbase.ServerName;
-org.apache.hadoop.hbase.ClusterStatus;
+org.apache.hadoop.hbase.ClusterMetrics;
 org.apache.hadoop.hbase.master.HMaster;
 org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
 </%import>
@@ -55,7 +55,7 @@ MasterAddressTracker masterAddressTracker = master.getMasterAddressTracker();
     </tr>
     <%java>
     Collection<ServerName> backup_masters = master.getClusterStatusWithoutCoprocessor(
-        EnumSet.of(ClusterStatus.Option.BACKUP_MASTERS)).getBackupMasters();
+        EnumSet.of(ClusterMetrics.Option.BACKUP_MASTERS)).getBackupMasters();
     ServerName [] backupServerNames = backup_masters.toArray(new ServerName[backup_masters.size()]);
     Arrays.sort(backupServerNames);
     for (ServerName serverName : backupServerNames) {


Mime
View raw message