hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ecl...@apache.org
Subject [29/38] HBASE-12197 Move rest to it's on module
Date Fri, 10 Oct 2014 16:53:10 GMT
http://git-wip-us.apache.org/repos/asf/hbase/blob/876617bd/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/StorageClusterStatusMessage.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/StorageClusterStatusMessage.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/StorageClusterStatusMessage.java
new file mode 100644
index 0000000..05ff7a3
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/StorageClusterStatusMessage.java
@@ -0,0 +1,3955 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: StorageClusterStatusMessage.proto
+
+package org.apache.hadoop.hbase.rest.protobuf.generated;
+
+public final class StorageClusterStatusMessage {
+  private StorageClusterStatusMessage() {}
+  public static void registerAllExtensions(
+      com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface StorageClusterStatusOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
+    /**
+     * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
+     *
+     * <pre>
+     * node status
+     * </pre>
+     */
+    java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node> 
+        getLiveNodesList();
+    /**
+     * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
+     *
+     * <pre>
+     * node status
+     * </pre>
+     */
+    org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node getLiveNodes(int index);
+    /**
+     * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
+     *
+     * <pre>
+     * node status
+     * </pre>
+     */
+    int getLiveNodesCount();
+    /**
+     * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
+     *
+     * <pre>
+     * node status
+     * </pre>
+     */
+    java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.NodeOrBuilder> 
+        getLiveNodesOrBuilderList();
+    /**
+     * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;</code>
+     *
+     * <pre>
+     * node status
+     * </pre>
+     */
+    org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.NodeOrBuilder getLiveNodesOrBuilder(
+        int index);
+
+    // repeated string deadNodes = 2;
+    /**
+     * <code>repeated string deadNodes = 2;</code>
+     */
+    java.util.List<java.lang.String>
+    getDeadNodesList();
+    /**
+     * <code>repeated string deadNodes = 2;</code>
+     */
+    int getDeadNodesCount();
+    /**
+     * <code>repeated string deadNodes = 2;</code>
+     */
+    java.lang.String getDeadNodes(int index);
+    /**
+     * <code>repeated string deadNodes = 2;</code>
+     */
+    com.google.protobuf.ByteString
+        getDeadNodesBytes(int index);
+
+    // optional int32 regions = 3;
+    /**
+     * <code>optional int32 regions = 3;</code>
+     *
+     * <pre>
+     * summary statistics
+     * </pre>
+     */
+    boolean hasRegions();
+    /**
+     * <code>optional int32 regions = 3;</code>
+     *
+     * <pre>
+     * summary statistics
+     * </pre>
+     */
+    int getRegions();
+
+    // optional int32 requests = 4;
+    /**
+     * <code>optional int32 requests = 4;</code>
+     */
+    boolean hasRequests();
+    /**
+     * <code>optional int32 requests = 4;</code>
+     */
+    int getRequests();
+
+    // optional double averageLoad = 5;
+    /**
+     * <code>optional double averageLoad = 5;</code>
+     */
+    boolean hasAverageLoad();
+    /**
+     * <code>optional double averageLoad = 5;</code>
+     */
+    double getAverageLoad();
+  }
+  /**
+   * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus}
+   */
+  public static final class StorageClusterStatus extends
+      com.google.protobuf.GeneratedMessage
+      implements StorageClusterStatusOrBuilder {
+    // Use StorageClusterStatus.newBuilder() to construct.
+    private StorageClusterStatus(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private StorageClusterStatus(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final StorageClusterStatus defaultInstance;
+    public static StorageClusterStatus getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public StorageClusterStatus getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private StorageClusterStatus(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+                liveNodes_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node>();
+                mutable_bitField0_ |= 0x00000001;
+              }
+              liveNodes_.add(input.readMessage(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.PARSER, extensionRegistry));
+              break;
+            }
+            case 18: {
+              if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+                deadNodes_ = new com.google.protobuf.LazyStringArrayList();
+                mutable_bitField0_ |= 0x00000002;
+              }
+              deadNodes_.add(input.readBytes());
+              break;
+            }
+            case 24: {
+              bitField0_ |= 0x00000001;
+              regions_ = input.readInt32();
+              break;
+            }
+            case 32: {
+              bitField0_ |= 0x00000002;
+              requests_ = input.readInt32();
+              break;
+            }
+            case 41: {
+              bitField0_ |= 0x00000004;
+              averageLoad_ = input.readDouble();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+          liveNodes_ = java.util.Collections.unmodifiableList(liveNodes_);
+        }
+        if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+          deadNodes_ = new com.google.protobuf.UnmodifiableLazyStringList(deadNodes_);
+        }
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.class, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<StorageClusterStatus> PARSER =
+        new com.google.protobuf.AbstractParser<StorageClusterStatus>() {
+      public StorageClusterStatus parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new StorageClusterStatus(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<StorageClusterStatus> getParserForType() {
+      return PARSER;
+    }
+
+    public interface RegionOrBuilder
+        extends com.google.protobuf.MessageOrBuilder {
+
+      // required bytes name = 1;
+      /**
+       * <code>required bytes name = 1;</code>
+       */
+      boolean hasName();
+      /**
+       * <code>required bytes name = 1;</code>
+       */
+      com.google.protobuf.ByteString getName();
+
+      // optional int32 stores = 2;
+      /**
+       * <code>optional int32 stores = 2;</code>
+       */
+      boolean hasStores();
+      /**
+       * <code>optional int32 stores = 2;</code>
+       */
+      int getStores();
+
+      // optional int32 storefiles = 3;
+      /**
+       * <code>optional int32 storefiles = 3;</code>
+       */
+      boolean hasStorefiles();
+      /**
+       * <code>optional int32 storefiles = 3;</code>
+       */
+      int getStorefiles();
+
+      // optional int32 storefileSizeMB = 4;
+      /**
+       * <code>optional int32 storefileSizeMB = 4;</code>
+       */
+      boolean hasStorefileSizeMB();
+      /**
+       * <code>optional int32 storefileSizeMB = 4;</code>
+       */
+      int getStorefileSizeMB();
+
+      // optional int32 memstoreSizeMB = 5;
+      /**
+       * <code>optional int32 memstoreSizeMB = 5;</code>
+       */
+      boolean hasMemstoreSizeMB();
+      /**
+       * <code>optional int32 memstoreSizeMB = 5;</code>
+       */
+      int getMemstoreSizeMB();
+
+      // optional int32 storefileIndexSizeMB = 6;
+      /**
+       * <code>optional int32 storefileIndexSizeMB = 6;</code>
+       */
+      boolean hasStorefileIndexSizeMB();
+      /**
+       * <code>optional int32 storefileIndexSizeMB = 6;</code>
+       */
+      int getStorefileIndexSizeMB();
+
+      // optional int64 readRequestsCount = 7;
+      /**
+       * <code>optional int64 readRequestsCount = 7;</code>
+       */
+      boolean hasReadRequestsCount();
+      /**
+       * <code>optional int64 readRequestsCount = 7;</code>
+       */
+      long getReadRequestsCount();
+
+      // optional int64 writeRequestsCount = 8;
+      /**
+       * <code>optional int64 writeRequestsCount = 8;</code>
+       */
+      boolean hasWriteRequestsCount();
+      /**
+       * <code>optional int64 writeRequestsCount = 8;</code>
+       */
+      long getWriteRequestsCount();
+
+      // optional int32 rootIndexSizeKB = 9;
+      /**
+       * <code>optional int32 rootIndexSizeKB = 9;</code>
+       */
+      boolean hasRootIndexSizeKB();
+      /**
+       * <code>optional int32 rootIndexSizeKB = 9;</code>
+       */
+      int getRootIndexSizeKB();
+
+      // optional int32 totalStaticIndexSizeKB = 10;
+      /**
+       * <code>optional int32 totalStaticIndexSizeKB = 10;</code>
+       */
+      boolean hasTotalStaticIndexSizeKB();
+      /**
+       * <code>optional int32 totalStaticIndexSizeKB = 10;</code>
+       */
+      int getTotalStaticIndexSizeKB();
+
+      // optional int32 totalStaticBloomSizeKB = 11;
+      /**
+       * <code>optional int32 totalStaticBloomSizeKB = 11;</code>
+       */
+      boolean hasTotalStaticBloomSizeKB();
+      /**
+       * <code>optional int32 totalStaticBloomSizeKB = 11;</code>
+       */
+      int getTotalStaticBloomSizeKB();
+
+      // optional int64 totalCompactingKVs = 12;
+      /**
+       * <code>optional int64 totalCompactingKVs = 12;</code>
+       */
+      boolean hasTotalCompactingKVs();
+      /**
+       * <code>optional int64 totalCompactingKVs = 12;</code>
+       */
+      long getTotalCompactingKVs();
+
+      // optional int64 currentCompactedKVs = 13;
+      /**
+       * <code>optional int64 currentCompactedKVs = 13;</code>
+       */
+      boolean hasCurrentCompactedKVs();
+      /**
+       * <code>optional int64 currentCompactedKVs = 13;</code>
+       */
+      long getCurrentCompactedKVs();
+    }
+    /**
+     * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region}
+     */
+    public static final class Region extends
+        com.google.protobuf.GeneratedMessage
+        implements RegionOrBuilder {
+      // Use Region.newBuilder() to construct.
+      private Region(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+        super(builder);
+        this.unknownFields = builder.getUnknownFields();
+      }
+      private Region(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+      private static final Region defaultInstance;
+      public static Region getDefaultInstance() {
+        return defaultInstance;
+      }
+
+      public Region getDefaultInstanceForType() {
+        return defaultInstance;
+      }
+
+      private final com.google.protobuf.UnknownFieldSet unknownFields;
+      @java.lang.Override
+      public final com.google.protobuf.UnknownFieldSet
+          getUnknownFields() {
+        return this.unknownFields;
+      }
+      private Region(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        initFields();
+        int mutable_bitField0_ = 0;
+        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+            com.google.protobuf.UnknownFieldSet.newBuilder();
+        try {
+          boolean done = false;
+          while (!done) {
+            int tag = input.readTag();
+            switch (tag) {
+              case 0:
+                done = true;
+                break;
+              default: {
+                if (!parseUnknownField(input, unknownFields,
+                                       extensionRegistry, tag)) {
+                  done = true;
+                }
+                break;
+              }
+              case 10: {
+                bitField0_ |= 0x00000001;
+                name_ = input.readBytes();
+                break;
+              }
+              case 16: {
+                bitField0_ |= 0x00000002;
+                stores_ = input.readInt32();
+                break;
+              }
+              case 24: {
+                bitField0_ |= 0x00000004;
+                storefiles_ = input.readInt32();
+                break;
+              }
+              case 32: {
+                bitField0_ |= 0x00000008;
+                storefileSizeMB_ = input.readInt32();
+                break;
+              }
+              case 40: {
+                bitField0_ |= 0x00000010;
+                memstoreSizeMB_ = input.readInt32();
+                break;
+              }
+              case 48: {
+                bitField0_ |= 0x00000020;
+                storefileIndexSizeMB_ = input.readInt32();
+                break;
+              }
+              case 56: {
+                bitField0_ |= 0x00000040;
+                readRequestsCount_ = input.readInt64();
+                break;
+              }
+              case 64: {
+                bitField0_ |= 0x00000080;
+                writeRequestsCount_ = input.readInt64();
+                break;
+              }
+              case 72: {
+                bitField0_ |= 0x00000100;
+                rootIndexSizeKB_ = input.readInt32();
+                break;
+              }
+              case 80: {
+                bitField0_ |= 0x00000200;
+                totalStaticIndexSizeKB_ = input.readInt32();
+                break;
+              }
+              case 88: {
+                bitField0_ |= 0x00000400;
+                totalStaticBloomSizeKB_ = input.readInt32();
+                break;
+              }
+              case 96: {
+                bitField0_ |= 0x00000800;
+                totalCompactingKVs_ = input.readInt64();
+                break;
+              }
+              case 104: {
+                bitField0_ |= 0x00001000;
+                currentCompactedKVs_ = input.readInt64();
+                break;
+              }
+            }
+          }
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          throw e.setUnfinishedMessage(this);
+        } catch (java.io.IOException e) {
+          throw new com.google.protobuf.InvalidProtocolBufferException(
+              e.getMessage()).setUnfinishedMessage(this);
+        } finally {
+          this.unknownFields = unknownFields.build();
+          makeExtensionsImmutable();
+        }
+      }
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.class, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder.class);
+      }
+
+      public static com.google.protobuf.Parser<Region> PARSER =
+          new com.google.protobuf.AbstractParser<Region>() {
+        public Region parsePartialFrom(
+            com.google.protobuf.CodedInputStream input,
+            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+            throws com.google.protobuf.InvalidProtocolBufferException {
+          return new Region(input, extensionRegistry);
+        }
+      };
+
+      @java.lang.Override
+      public com.google.protobuf.Parser<Region> getParserForType() {
+        return PARSER;
+      }
+
+      private int bitField0_;
+      // required bytes name = 1;
+      public static final int NAME_FIELD_NUMBER = 1;
+      private com.google.protobuf.ByteString name_;
+      /**
+       * <code>required bytes name = 1;</code>
+       */
+      public boolean hasName() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required bytes name = 1;</code>
+       */
+      public com.google.protobuf.ByteString getName() {
+        return name_;
+      }
+
+      // optional int32 stores = 2;
+      public static final int STORES_FIELD_NUMBER = 2;
+      private int stores_;
+      /**
+       * <code>optional int32 stores = 2;</code>
+       */
+      public boolean hasStores() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * <code>optional int32 stores = 2;</code>
+       */
+      public int getStores() {
+        return stores_;
+      }
+
+      // optional int32 storefiles = 3;
+      public static final int STOREFILES_FIELD_NUMBER = 3;
+      private int storefiles_;
+      /**
+       * <code>optional int32 storefiles = 3;</code>
+       */
+      public boolean hasStorefiles() {
+        return ((bitField0_ & 0x00000004) == 0x00000004);
+      }
+      /**
+       * <code>optional int32 storefiles = 3;</code>
+       */
+      public int getStorefiles() {
+        return storefiles_;
+      }
+
+      // optional int32 storefileSizeMB = 4;
+      public static final int STOREFILESIZEMB_FIELD_NUMBER = 4;
+      private int storefileSizeMB_;
+      /**
+       * <code>optional int32 storefileSizeMB = 4;</code>
+       */
+      public boolean hasStorefileSizeMB() {
+        return ((bitField0_ & 0x00000008) == 0x00000008);
+      }
+      /**
+       * <code>optional int32 storefileSizeMB = 4;</code>
+       */
+      public int getStorefileSizeMB() {
+        return storefileSizeMB_;
+      }
+
+      // optional int32 memstoreSizeMB = 5;
+      public static final int MEMSTORESIZEMB_FIELD_NUMBER = 5;
+      private int memstoreSizeMB_;
+      /**
+       * <code>optional int32 memstoreSizeMB = 5;</code>
+       */
+      public boolean hasMemstoreSizeMB() {
+        return ((bitField0_ & 0x00000010) == 0x00000010);
+      }
+      /**
+       * <code>optional int32 memstoreSizeMB = 5;</code>
+       */
+      public int getMemstoreSizeMB() {
+        return memstoreSizeMB_;
+      }
+
+      // optional int32 storefileIndexSizeMB = 6;
+      public static final int STOREFILEINDEXSIZEMB_FIELD_NUMBER = 6;
+      private int storefileIndexSizeMB_;
+      /**
+       * <code>optional int32 storefileIndexSizeMB = 6;</code>
+       */
+      public boolean hasStorefileIndexSizeMB() {
+        return ((bitField0_ & 0x00000020) == 0x00000020);
+      }
+      /**
+       * <code>optional int32 storefileIndexSizeMB = 6;</code>
+       */
+      public int getStorefileIndexSizeMB() {
+        return storefileIndexSizeMB_;
+      }
+
+      // optional int64 readRequestsCount = 7;
+      public static final int READREQUESTSCOUNT_FIELD_NUMBER = 7;
+      private long readRequestsCount_;
+      /**
+       * <code>optional int64 readRequestsCount = 7;</code>
+       */
+      public boolean hasReadRequestsCount() {
+        return ((bitField0_ & 0x00000040) == 0x00000040);
+      }
+      /**
+       * <code>optional int64 readRequestsCount = 7;</code>
+       */
+      public long getReadRequestsCount() {
+        return readRequestsCount_;
+      }
+
+      // optional int64 writeRequestsCount = 8;
+      public static final int WRITEREQUESTSCOUNT_FIELD_NUMBER = 8;
+      private long writeRequestsCount_;
+      /**
+       * <code>optional int64 writeRequestsCount = 8;</code>
+       */
+      public boolean hasWriteRequestsCount() {
+        return ((bitField0_ & 0x00000080) == 0x00000080);
+      }
+      /**
+       * <code>optional int64 writeRequestsCount = 8;</code>
+       */
+      public long getWriteRequestsCount() {
+        return writeRequestsCount_;
+      }
+
+      // optional int32 rootIndexSizeKB = 9;
+      public static final int ROOTINDEXSIZEKB_FIELD_NUMBER = 9;
+      private int rootIndexSizeKB_;
+      /**
+       * <code>optional int32 rootIndexSizeKB = 9;</code>
+       */
+      public boolean hasRootIndexSizeKB() {
+        return ((bitField0_ & 0x00000100) == 0x00000100);
+      }
+      /**
+       * <code>optional int32 rootIndexSizeKB = 9;</code>
+       */
+      public int getRootIndexSizeKB() {
+        return rootIndexSizeKB_;
+      }
+
+      // optional int32 totalStaticIndexSizeKB = 10;
+      public static final int TOTALSTATICINDEXSIZEKB_FIELD_NUMBER = 10;
+      private int totalStaticIndexSizeKB_;
+      /**
+       * <code>optional int32 totalStaticIndexSizeKB = 10;</code>
+       */
+      public boolean hasTotalStaticIndexSizeKB() {
+        return ((bitField0_ & 0x00000200) == 0x00000200);
+      }
+      /**
+       * <code>optional int32 totalStaticIndexSizeKB = 10;</code>
+       */
+      public int getTotalStaticIndexSizeKB() {
+        return totalStaticIndexSizeKB_;
+      }
+
+      // optional int32 totalStaticBloomSizeKB = 11;
+      public static final int TOTALSTATICBLOOMSIZEKB_FIELD_NUMBER = 11;
+      private int totalStaticBloomSizeKB_;
+      /**
+       * <code>optional int32 totalStaticBloomSizeKB = 11;</code>
+       */
+      public boolean hasTotalStaticBloomSizeKB() {
+        return ((bitField0_ & 0x00000400) == 0x00000400);
+      }
+      /**
+       * <code>optional int32 totalStaticBloomSizeKB = 11;</code>
+       */
+      public int getTotalStaticBloomSizeKB() {
+        return totalStaticBloomSizeKB_;
+      }
+
+      // optional int64 totalCompactingKVs = 12;
+      public static final int TOTALCOMPACTINGKVS_FIELD_NUMBER = 12;
+      private long totalCompactingKVs_;
+      /**
+       * <code>optional int64 totalCompactingKVs = 12;</code>
+       */
+      public boolean hasTotalCompactingKVs() {
+        return ((bitField0_ & 0x00000800) == 0x00000800);
+      }
+      /**
+       * <code>optional int64 totalCompactingKVs = 12;</code>
+       */
+      public long getTotalCompactingKVs() {
+        return totalCompactingKVs_;
+      }
+
+      // optional int64 currentCompactedKVs = 13;
+      public static final int CURRENTCOMPACTEDKVS_FIELD_NUMBER = 13;
+      private long currentCompactedKVs_;
+      /**
+       * <code>optional int64 currentCompactedKVs = 13;</code>
+       */
+      public boolean hasCurrentCompactedKVs() {
+        return ((bitField0_ & 0x00001000) == 0x00001000);
+      }
+      /**
+       * <code>optional int64 currentCompactedKVs = 13;</code>
+       */
+      public long getCurrentCompactedKVs() {
+        return currentCompactedKVs_;
+      }
+
+      private void initFields() {
+        name_ = com.google.protobuf.ByteString.EMPTY;
+        stores_ = 0;
+        storefiles_ = 0;
+        storefileSizeMB_ = 0;
+        memstoreSizeMB_ = 0;
+        storefileIndexSizeMB_ = 0;
+        readRequestsCount_ = 0L;
+        writeRequestsCount_ = 0L;
+        rootIndexSizeKB_ = 0;
+        totalStaticIndexSizeKB_ = 0;
+        totalStaticBloomSizeKB_ = 0;
+        totalCompactingKVs_ = 0L;
+        currentCompactedKVs_ = 0L;
+      }
+      private byte memoizedIsInitialized = -1;
+      public final boolean isInitialized() {
+        byte isInitialized = memoizedIsInitialized;
+        if (isInitialized != -1) return isInitialized == 1;
+
+        if (!hasName()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+        memoizedIsInitialized = 1;
+        return true;
+      }
+
+      public void writeTo(com.google.protobuf.CodedOutputStream output)
+                          throws java.io.IOException {
+        getSerializedSize();
+        if (((bitField0_ & 0x00000001) == 0x00000001)) {
+          output.writeBytes(1, name_);
+        }
+        if (((bitField0_ & 0x00000002) == 0x00000002)) {
+          output.writeInt32(2, stores_);
+        }
+        if (((bitField0_ & 0x00000004) == 0x00000004)) {
+          output.writeInt32(3, storefiles_);
+        }
+        if (((bitField0_ & 0x00000008) == 0x00000008)) {
+          output.writeInt32(4, storefileSizeMB_);
+        }
+        if (((bitField0_ & 0x00000010) == 0x00000010)) {
+          output.writeInt32(5, memstoreSizeMB_);
+        }
+        if (((bitField0_ & 0x00000020) == 0x00000020)) {
+          output.writeInt32(6, storefileIndexSizeMB_);
+        }
+        if (((bitField0_ & 0x00000040) == 0x00000040)) {
+          output.writeInt64(7, readRequestsCount_);
+        }
+        if (((bitField0_ & 0x00000080) == 0x00000080)) {
+          output.writeInt64(8, writeRequestsCount_);
+        }
+        if (((bitField0_ & 0x00000100) == 0x00000100)) {
+          output.writeInt32(9, rootIndexSizeKB_);
+        }
+        if (((bitField0_ & 0x00000200) == 0x00000200)) {
+          output.writeInt32(10, totalStaticIndexSizeKB_);
+        }
+        if (((bitField0_ & 0x00000400) == 0x00000400)) {
+          output.writeInt32(11, totalStaticBloomSizeKB_);
+        }
+        if (((bitField0_ & 0x00000800) == 0x00000800)) {
+          output.writeInt64(12, totalCompactingKVs_);
+        }
+        if (((bitField0_ & 0x00001000) == 0x00001000)) {
+          output.writeInt64(13, currentCompactedKVs_);
+        }
+        getUnknownFields().writeTo(output);
+      }
+
+      private int memoizedSerializedSize = -1;
+      public int getSerializedSize() {
+        int size = memoizedSerializedSize;
+        if (size != -1) return size;
+
+        size = 0;
+        if (((bitField0_ & 0x00000001) == 0x00000001)) {
+          size += com.google.protobuf.CodedOutputStream
+            .computeBytesSize(1, name_);
+        }
+        if (((bitField0_ & 0x00000002) == 0x00000002)) {
+          size += com.google.protobuf.CodedOutputStream
+            .computeInt32Size(2, stores_);
+        }
+        if (((bitField0_ & 0x00000004) == 0x00000004)) {
+          size += com.google.protobuf.CodedOutputStream
+            .computeInt32Size(3, storefiles_);
+        }
+        if (((bitField0_ & 0x00000008) == 0x00000008)) {
+          size += com.google.protobuf.CodedOutputStream
+            .computeInt32Size(4, storefileSizeMB_);
+        }
+        if (((bitField0_ & 0x00000010) == 0x00000010)) {
+          size += com.google.protobuf.CodedOutputStream
+            .computeInt32Size(5, memstoreSizeMB_);
+        }
+        if (((bitField0_ & 0x00000020) == 0x00000020)) {
+          size += com.google.protobuf.CodedOutputStream
+            .computeInt32Size(6, storefileIndexSizeMB_);
+        }
+        if (((bitField0_ & 0x00000040) == 0x00000040)) {
+          size += com.google.protobuf.CodedOutputStream
+            .computeInt64Size(7, readRequestsCount_);
+        }
+        if (((bitField0_ & 0x00000080) == 0x00000080)) {
+          size += com.google.protobuf.CodedOutputStream
+            .computeInt64Size(8, writeRequestsCount_);
+        }
+        if (((bitField0_ & 0x00000100) == 0x00000100)) {
+          size += com.google.protobuf.CodedOutputStream
+            .computeInt32Size(9, rootIndexSizeKB_);
+        }
+        if (((bitField0_ & 0x00000200) == 0x00000200)) {
+          size += com.google.protobuf.CodedOutputStream
+            .computeInt32Size(10, totalStaticIndexSizeKB_);
+        }
+        if (((bitField0_ & 0x00000400) == 0x00000400)) {
+          size += com.google.protobuf.CodedOutputStream
+            .computeInt32Size(11, totalStaticBloomSizeKB_);
+        }
+        if (((bitField0_ & 0x00000800) == 0x00000800)) {
+          size += com.google.protobuf.CodedOutputStream
+            .computeInt64Size(12, totalCompactingKVs_);
+        }
+        if (((bitField0_ & 0x00001000) == 0x00001000)) {
+          size += com.google.protobuf.CodedOutputStream
+            .computeInt64Size(13, currentCompactedKVs_);
+        }
+        size += getUnknownFields().getSerializedSize();
+        memoizedSerializedSize = size;
+        return size;
+      }
+
+      private static final long serialVersionUID = 0L;
+      @java.lang.Override
+      protected java.lang.Object writeReplace()
+          throws java.io.ObjectStreamException {
+        return super.writeReplace();
+      }
+
+      public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
+          com.google.protobuf.ByteString data)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return PARSER.parseFrom(data);
+      }
+      public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
+          com.google.protobuf.ByteString data,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return PARSER.parseFrom(data, extensionRegistry);
+      }
+      public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(byte[] data)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return PARSER.parseFrom(data);
+      }
+      public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
+          byte[] data,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return PARSER.parseFrom(data, extensionRegistry);
+      }
+      public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(java.io.InputStream input)
+          throws java.io.IOException {
+        return PARSER.parseFrom(input);
+      }
+      public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
+          java.io.InputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        return PARSER.parseFrom(input, extensionRegistry);
+      }
+      public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseDelimitedFrom(java.io.InputStream input)
+          throws java.io.IOException {
+        return PARSER.parseDelimitedFrom(input);
+      }
+      public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseDelimitedFrom(
+          java.io.InputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        return PARSER.parseDelimitedFrom(input, extensionRegistry);
+      }
+      public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
+          com.google.protobuf.CodedInputStream input)
+          throws java.io.IOException {
+        return PARSER.parseFrom(input);
+      }
+      public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        return PARSER.parseFrom(input, extensionRegistry);
+      }
+
+      public static Builder newBuilder() { return Builder.create(); }
+      public Builder newBuilderForType() { return newBuilder(); }
+      public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region prototype) {
+        return newBuilder().mergeFrom(prototype);
+      }
+      public Builder toBuilder() { return newBuilder(this); }
+
+      @java.lang.Override
+      protected Builder newBuilderForType(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        Builder builder = new Builder(parent);
+        return builder;
+      }
+      /**
+       * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region}
+       */
+      public static final class Builder extends
+          com.google.protobuf.GeneratedMessage.Builder<Builder>
+         implements org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder {
+        public static final com.google.protobuf.Descriptors.Descriptor
+            getDescriptor() {
+          return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_descriptor;
+        }
+
+        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+            internalGetFieldAccessorTable() {
+          return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_fieldAccessorTable
+              .ensureFieldAccessorsInitialized(
+                  org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.class, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder.class);
+        }
+
+        // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.newBuilder()
+        private Builder() {
+          maybeForceBuilderInitialization();
+        }
+
+        private Builder(
+            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+          super(parent);
+          maybeForceBuilderInitialization();
+        }
+        private void maybeForceBuilderInitialization() {
+          if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          }
+        }
+        private static Builder create() {
+          return new Builder();
+        }
+
+        public Builder clear() {
+          super.clear();
+          name_ = com.google.protobuf.ByteString.EMPTY;
+          bitField0_ = (bitField0_ & ~0x00000001);
+          stores_ = 0;
+          bitField0_ = (bitField0_ & ~0x00000002);
+          storefiles_ = 0;
+          bitField0_ = (bitField0_ & ~0x00000004);
+          storefileSizeMB_ = 0;
+          bitField0_ = (bitField0_ & ~0x00000008);
+          memstoreSizeMB_ = 0;
+          bitField0_ = (bitField0_ & ~0x00000010);
+          storefileIndexSizeMB_ = 0;
+          bitField0_ = (bitField0_ & ~0x00000020);
+          readRequestsCount_ = 0L;
+          bitField0_ = (bitField0_ & ~0x00000040);
+          writeRequestsCount_ = 0L;
+          bitField0_ = (bitField0_ & ~0x00000080);
+          rootIndexSizeKB_ = 0;
+          bitField0_ = (bitField0_ & ~0x00000100);
+          totalStaticIndexSizeKB_ = 0;
+          bitField0_ = (bitField0_ & ~0x00000200);
+          totalStaticBloomSizeKB_ = 0;
+          bitField0_ = (bitField0_ & ~0x00000400);
+          totalCompactingKVs_ = 0L;
+          bitField0_ = (bitField0_ & ~0x00000800);
+          currentCompactedKVs_ = 0L;
+          bitField0_ = (bitField0_ & ~0x00001000);
+          return this;
+        }
+
+        public Builder clone() {
+          return create().mergeFrom(buildPartial());
+        }
+
+        public com.google.protobuf.Descriptors.Descriptor
+            getDescriptorForType() {
+          return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_descriptor;
+        }
+
+        public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region getDefaultInstanceForType() {
+          return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.getDefaultInstance();
+        }
+
+        public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region build() {
+          org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region result = buildPartial();
+          if (!result.isInitialized()) {
+            throw newUninitializedMessageException(result);
+          }
+          return result;
+        }
+
+        public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region buildPartial() {
+          org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region result = new org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region(this);
+          int from_bitField0_ = bitField0_;
+          int to_bitField0_ = 0;
+          if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+            to_bitField0_ |= 0x00000001;
+          }
+          result.name_ = name_;
+          if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+            to_bitField0_ |= 0x00000002;
+          }
+          result.stores_ = stores_;
+          if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+            to_bitField0_ |= 0x00000004;
+          }
+          result.storefiles_ = storefiles_;
+          if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+            to_bitField0_ |= 0x00000008;
+          }
+          result.storefileSizeMB_ = storefileSizeMB_;
+          if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+            to_bitField0_ |= 0x00000010;
+          }
+          result.memstoreSizeMB_ = memstoreSizeMB_;
+          if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
+            to_bitField0_ |= 0x00000020;
+          }
+          result.storefileIndexSizeMB_ = storefileIndexSizeMB_;
+          if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
+            to_bitField0_ |= 0x00000040;
+          }
+          result.readRequestsCount_ = readRequestsCount_;
+          if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
+            to_bitField0_ |= 0x00000080;
+          }
+          result.writeRequestsCount_ = writeRequestsCount_;
+          if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
+            to_bitField0_ |= 0x00000100;
+          }
+          result.rootIndexSizeKB_ = rootIndexSizeKB_;
+          if (((from_bitField0_ & 0x00000200) == 0x00000200)) {
+            to_bitField0_ |= 0x00000200;
+          }
+          result.totalStaticIndexSizeKB_ = totalStaticIndexSizeKB_;
+          if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
+            to_bitField0_ |= 0x00000400;
+          }
+          result.totalStaticBloomSizeKB_ = totalStaticBloomSizeKB_;
+          if (((from_bitField0_ & 0x00000800) == 0x00000800)) {
+            to_bitField0_ |= 0x00000800;
+          }
+          result.totalCompactingKVs_ = totalCompactingKVs_;
+          if (((from_bitField0_ & 0x00001000) == 0x00001000)) {
+            to_bitField0_ |= 0x00001000;
+          }
+          result.currentCompactedKVs_ = currentCompactedKVs_;
+          result.bitField0_ = to_bitField0_;
+          onBuilt();
+          return result;
+        }
+
+        public Builder mergeFrom(com.google.protobuf.Message other) {
+          if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region) {
+            return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region)other);
+          } else {
+            super.mergeFrom(other);
+            return this;
+          }
+        }
+
+        public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region other) {
+          if (other == org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.getDefaultInstance()) return this;
+          if (other.hasName()) {
+            setName(other.getName());
+          }
+          if (other.hasStores()) {
+            setStores(other.getStores());
+          }
+          if (other.hasStorefiles()) {
+            setStorefiles(other.getStorefiles());
+          }
+          if (other.hasStorefileSizeMB()) {
+            setStorefileSizeMB(other.getStorefileSizeMB());
+          }
+          if (other.hasMemstoreSizeMB()) {
+            setMemstoreSizeMB(other.getMemstoreSizeMB());
+          }
+          if (other.hasStorefileIndexSizeMB()) {
+            setStorefileIndexSizeMB(other.getStorefileIndexSizeMB());
+          }
+          if (other.hasReadRequestsCount()) {
+            setReadRequestsCount(other.getReadRequestsCount());
+          }
+          if (other.hasWriteRequestsCount()) {
+            setWriteRequestsCount(other.getWriteRequestsCount());
+          }
+          if (other.hasRootIndexSizeKB()) {
+            setRootIndexSizeKB(other.getRootIndexSizeKB());
+          }
+          if (other.hasTotalStaticIndexSizeKB()) {
+            setTotalStaticIndexSizeKB(other.getTotalStaticIndexSizeKB());
+          }
+          if (other.hasTotalStaticBloomSizeKB()) {
+            setTotalStaticBloomSizeKB(other.getTotalStaticBloomSizeKB());
+          }
+          if (other.hasTotalCompactingKVs()) {
+            setTotalCompactingKVs(other.getTotalCompactingKVs());
+          }
+          if (other.hasCurrentCompactedKVs()) {
+            setCurrentCompactedKVs(other.getCurrentCompactedKVs());
+          }
+          this.mergeUnknownFields(other.getUnknownFields());
+          return this;
+        }
+
+        public final boolean isInitialized() {
+          if (!hasName()) {
+            
+            return false;
+          }
+          return true;
+        }
+
+        public Builder mergeFrom(
+            com.google.protobuf.CodedInputStream input,
+            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+            throws java.io.IOException {
+          org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parsedMessage = null;
+          try {
+            parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+            parsedMessage = (org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region) e.getUnfinishedMessage();
+            throw e;
+          } finally {
+            if (parsedMessage != null) {
+              mergeFrom(parsedMessage);
+            }
+          }
+          return this;
+        }
+        private int bitField0_;
+
+        // required bytes name = 1;
+        private com.google.protobuf.ByteString name_ = com.google.protobuf.ByteString.EMPTY;
+        /**
+         * <code>required bytes name = 1;</code>
+         */
+        public boolean hasName() {
+          return ((bitField0_ & 0x00000001) == 0x00000001);
+        }
+        /**
+         * <code>required bytes name = 1;</code>
+         */
+        public com.google.protobuf.ByteString getName() {
+          return name_;
+        }
+        /**
+         * <code>required bytes name = 1;</code>
+         */
+        public Builder setName(com.google.protobuf.ByteString value) {
+          if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+          name_ = value;
+          onChanged();
+          return this;
+        }
+        /**
+         * <code>required bytes name = 1;</code>
+         */
+        public Builder clearName() {
+          bitField0_ = (bitField0_ & ~0x00000001);
+          name_ = getDefaultInstance().getName();
+          onChanged();
+          return this;
+        }
+
+        // optional int32 stores = 2;
+        private int stores_ ;
+        /**
+         * <code>optional int32 stores = 2;</code>
+         */
+        public boolean hasStores() {
+          return ((bitField0_ & 0x00000002) == 0x00000002);
+        }
+        /**
+         * <code>optional int32 stores = 2;</code>
+         */
+        public int getStores() {
+          return stores_;
+        }
+        /**
+         * <code>optional int32 stores = 2;</code>
+         */
+        public Builder setStores(int value) {
+          bitField0_ |= 0x00000002;
+          stores_ = value;
+          onChanged();
+          return this;
+        }
+        /**
+         * <code>optional int32 stores = 2;</code>
+         */
+        public Builder clearStores() {
+          bitField0_ = (bitField0_ & ~0x00000002);
+          stores_ = 0;
+          onChanged();
+          return this;
+        }
+
+        // optional int32 storefiles = 3;
+        private int storefiles_ ;
+        /**
+         * <code>optional int32 storefiles = 3;</code>
+         */
+        public boolean hasStorefiles() {
+          return ((bitField0_ & 0x00000004) == 0x00000004);
+        }
+        /**
+         * <code>optional int32 storefiles = 3;</code>
+         */
+        public int getStorefiles() {
+          return storefiles_;
+        }
+        /**
+         * <code>optional int32 storefiles = 3;</code>
+         */
+        public Builder setStorefiles(int value) {
+          bitField0_ |= 0x00000004;
+          storefiles_ = value;
+          onChanged();
+          return this;
+        }
+        /**
+         * <code>optional int32 storefiles = 3;</code>
+         */
+        public Builder clearStorefiles() {
+          bitField0_ = (bitField0_ & ~0x00000004);
+          storefiles_ = 0;
+          onChanged();
+          return this;
+        }
+
+        // optional int32 storefileSizeMB = 4;
+        private int storefileSizeMB_ ;
+        /**
+         * <code>optional int32 storefileSizeMB = 4;</code>
+         */
+        public boolean hasStorefileSizeMB() {
+          return ((bitField0_ & 0x00000008) == 0x00000008);
+        }
+        /**
+         * <code>optional int32 storefileSizeMB = 4;</code>
+         */
+        public int getStorefileSizeMB() {
+          return storefileSizeMB_;
+        }
+        /**
+         * <code>optional int32 storefileSizeMB = 4;</code>
+         */
+        public Builder setStorefileSizeMB(int value) {
+          bitField0_ |= 0x00000008;
+          storefileSizeMB_ = value;
+          onChanged();
+          return this;
+        }
+        /**
+         * <code>optional int32 storefileSizeMB = 4;</code>
+         */
+        public Builder clearStorefileSizeMB() {
+          bitField0_ = (bitField0_ & ~0x00000008);
+          storefileSizeMB_ = 0;
+          onChanged();
+          return this;
+        }
+
+        // optional int32 memstoreSizeMB = 5;
+        private int memstoreSizeMB_ ;
+        /**
+         * <code>optional int32 memstoreSizeMB = 5;</code>
+         */
+        public boolean hasMemstoreSizeMB() {
+          return ((bitField0_ & 0x00000010) == 0x00000010);
+        }
+        /**
+         * <code>optional int32 memstoreSizeMB = 5;</code>
+         */
+        public int getMemstoreSizeMB() {
+          return memstoreSizeMB_;
+        }
+        /**
+         * <code>optional int32 memstoreSizeMB = 5;</code>
+         */
+        public Builder setMemstoreSizeMB(int value) {
+          bitField0_ |= 0x00000010;
+          memstoreSizeMB_ = value;
+          onChanged();
+          return this;
+        }
+        /**
+         * <code>optional int32 memstoreSizeMB = 5;</code>
+         */
+        public Builder clearMemstoreSizeMB() {
+          bitField0_ = (bitField0_ & ~0x00000010);
+          memstoreSizeMB_ = 0;
+          onChanged();
+          return this;
+        }
+
+        // optional int32 storefileIndexSizeMB = 6;
+        private int storefileIndexSizeMB_ ;
+        /**
+         * <code>optional int32 storefileIndexSizeMB = 6;</code>
+         */
+        public boolean hasStorefileIndexSizeMB() {
+          return ((bitField0_ & 0x00000020) == 0x00000020);
+        }
+        /**
+         * <code>optional int32 storefileIndexSizeMB = 6;</code>
+         */
+        public int getStorefileIndexSizeMB() {
+          return storefileIndexSizeMB_;
+        }
+        /**
+         * <code>optional int32 storefileIndexSizeMB = 6;</code>
+         */
+        public Builder setStorefileIndexSizeMB(int value) {
+          bitField0_ |= 0x00000020;
+          storefileIndexSizeMB_ = value;
+          onChanged();
+          return this;
+        }
+        /**
+         * <code>optional int32 storefileIndexSizeMB = 6;</code>
+         */
+        public Builder clearStorefileIndexSizeMB() {
+          bitField0_ = (bitField0_ & ~0x00000020);
+          storefileIndexSizeMB_ = 0;
+          onChanged();
+          return this;
+        }
+
+        // optional int64 readRequestsCount = 7;
+        private long readRequestsCount_ ;
+        /**
+         * <code>optional int64 readRequestsCount = 7;</code>
+         */
+        public boolean hasReadRequestsCount() {
+          return ((bitField0_ & 0x00000040) == 0x00000040);
+        }
+        /**
+         * <code>optional int64 readRequestsCount = 7;</code>
+         */
+        public long getReadRequestsCount() {
+          return readRequestsCount_;
+        }
+        /**
+         * <code>optional int64 readRequestsCount = 7;</code>
+         */
+        public Builder setReadRequestsCount(long value) {
+          bitField0_ |= 0x00000040;
+          readRequestsCount_ = value;
+          onChanged();
+          return this;
+        }
+        /**
+         * <code>optional int64 readRequestsCount = 7;</code>
+         */
+        public Builder clearReadRequestsCount() {
+          bitField0_ = (bitField0_ & ~0x00000040);
+          readRequestsCount_ = 0L;
+          onChanged();
+          return this;
+        }
+
+        // optional int64 writeRequestsCount = 8;
+        private long writeRequestsCount_ ;
+        /**
+         * <code>optional int64 writeRequestsCount = 8;</code>
+         */
+        public boolean hasWriteRequestsCount() {
+          return ((bitField0_ & 0x00000080) == 0x00000080);
+        }
+        /**
+         * <code>optional int64 writeRequestsCount = 8;</code>
+         */
+        public long getWriteRequestsCount() {
+          return writeRequestsCount_;
+        }
+        /**
+         * <code>optional int64 writeRequestsCount = 8;</code>
+         */
+        public Builder setWriteRequestsCount(long value) {
+          bitField0_ |= 0x00000080;
+          writeRequestsCount_ = value;
+          onChanged();
+          return this;
+        }
+        /**
+         * <code>optional int64 writeRequestsCount = 8;</code>
+         */
+        public Builder clearWriteRequestsCount() {
+          bitField0_ = (bitField0_ & ~0x00000080);
+          writeRequestsCount_ = 0L;
+          onChanged();
+          return this;
+        }
+
+        // optional int32 rootIndexSizeKB = 9;
+        private int rootIndexSizeKB_ ;
+        /**
+         * <code>optional int32 rootIndexSizeKB = 9;</code>
+         */
+        public boolean hasRootIndexSizeKB() {
+          return ((bitField0_ & 0x00000100) == 0x00000100);
+        }
+        /**
+         * <code>optional int32 rootIndexSizeKB = 9;</code>
+         */
+        public int getRootIndexSizeKB() {
+          return rootIndexSizeKB_;
+        }
+        /**
+         * <code>optional int32 rootIndexSizeKB = 9;</code>
+         */
+        public Builder setRootIndexSizeKB(int value) {
+          bitField0_ |= 0x00000100;
+          rootIndexSizeKB_ = value;
+          onChanged();
+          return this;
+        }
+        /**
+         * <code>optional int32 rootIndexSizeKB = 9;</code>
+         */
+        public Builder clearRootIndexSizeKB() {
+          bitField0_ = (bitField0_ & ~0x00000100);
+          rootIndexSizeKB_ = 0;
+          onChanged();
+          return this;
+        }
+
+        // optional int32 totalStaticIndexSizeKB = 10;
+        private int totalStaticIndexSizeKB_ ;
+        /**
+         * <code>optional int32 totalStaticIndexSizeKB = 10;</code>
+         */
+        public boolean hasTotalStaticIndexSizeKB() {
+          return ((bitField0_ & 0x00000200) == 0x00000200);
+        }
+        /**
+         * <code>optional int32 totalStaticIndexSizeKB = 10;</code>
+         */
+        public int getTotalStaticIndexSizeKB() {
+          return totalStaticIndexSizeKB_;
+        }
+        /**
+         * <code>optional int32 totalStaticIndexSizeKB = 10;</code>
+         */
+        public Builder setTotalStaticIndexSizeKB(int value) {
+          bitField0_ |= 0x00000200;
+          totalStaticIndexSizeKB_ = value;
+          onChanged();
+          return this;
+        }
+        /**
+         * <code>optional int32 totalStaticIndexSizeKB = 10;</code>
+         */
+        public Builder clearTotalStaticIndexSizeKB() {
+          bitField0_ = (bitField0_ & ~0x00000200);
+          totalStaticIndexSizeKB_ = 0;
+          onChanged();
+          return this;
+        }
+
+        // optional int32 totalStaticBloomSizeKB = 11;
+        private int totalStaticBloomSizeKB_ ;
+        /**
+         * <code>optional int32 totalStaticBloomSizeKB = 11;</code>
+         */
+        public boolean hasTotalStaticBloomSizeKB() {
+          return ((bitField0_ & 0x00000400) == 0x00000400);
+        }
+        /**
+         * <code>optional int32 totalStaticBloomSizeKB = 11;</code>
+         */
+        public int getTotalStaticBloomSizeKB() {
+          return totalStaticBloomSizeKB_;
+        }
+        /**
+         * <code>optional int32 totalStaticBloomSizeKB = 11;</code>
+         */
+        public Builder setTotalStaticBloomSizeKB(int value) {
+          bitField0_ |= 0x00000400;
+          totalStaticBloomSizeKB_ = value;
+          onChanged();
+          return this;
+        }
+        /**
+         * <code>optional int32 totalStaticBloomSizeKB = 11;</code>
+         */
+        public Builder clearTotalStaticBloomSizeKB() {
+          bitField0_ = (bitField0_ & ~0x00000400);
+          totalStaticBloomSizeKB_ = 0;
+          onChanged();
+          return this;
+        }
+
+        // optional int64 totalCompactingKVs = 12;
+        private long totalCompactingKVs_ ;
+        /**
+         * <code>optional int64 totalCompactingKVs = 12;</code>
+         */
+        public boolean hasTotalCompactingKVs() {
+          return ((bitField0_ & 0x00000800) == 0x00000800);
+        }
+        /**
+         * <code>optional int64 totalCompactingKVs = 12;</code>
+         */
+        public long getTotalCompactingKVs() {
+          return totalCompactingKVs_;
+        }
+        /**
+         * <code>optional int64 totalCompactingKVs = 12;</code>
+         */
+        public Builder setTotalCompactingKVs(long value) {
+          bitField0_ |= 0x00000800;
+          totalCompactingKVs_ = value;
+          onChanged();
+          return this;
+        }
+        /**
+         * <code>optional int64 totalCompactingKVs = 12;</code>
+         */
+        public Builder clearTotalCompactingKVs() {
+          bitField0_ = (bitField0_ & ~0x00000800);
+          totalCompactingKVs_ = 0L;
+          onChanged();
+          return this;
+        }
+
+        // optional int64 currentCompactedKVs = 13;
+        private long currentCompactedKVs_ ;
+        /**
+         * <code>optional int64 currentCompactedKVs = 13;</code>
+         */
+        public boolean hasCurrentCompactedKVs() {
+          return ((bitField0_ & 0x00001000) == 0x00001000);
+        }
+        /**
+         * <code>optional int64 currentCompactedKVs = 13;</code>
+         */
+        public long getCurrentCompactedKVs() {
+          return currentCompactedKVs_;
+        }
+        /**
+         * <code>optional int64 currentCompactedKVs = 13;</code>
+         */
+        public Builder setCurrentCompactedKVs(long value) {
+          bitField0_ |= 0x00001000;
+          currentCompactedKVs_ = value;
+          onChanged();
+          return this;
+        }
+        /**
+         * <code>optional int64 currentCompactedKVs = 13;</code>
+         */
+        public Builder clearCurrentCompactedKVs() {
+          bitField0_ = (bitField0_ & ~0x00001000);
+          currentCompactedKVs_ = 0L;
+          onChanged();
+          return this;
+        }
+
+        // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region)
+      }
+
+      static {
+        defaultInstance = new Region(true);
+        defaultInstance.initFields();
+      }
+
+      // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region)
+    }
+
+    public interface NodeOrBuilder
+        extends com.google.protobuf.MessageOrBuilder {
+
+      // required string name = 1;
+      /**
+       * <code>required string name = 1;</code>
+       *
+       * <pre>
+       * name:port
+       * </pre>
+       */
+      boolean hasName();
+      /**
+       * <code>required string name = 1;</code>
+       *
+       * <pre>
+       * name:port
+       * </pre>
+       */
+      java.lang.String getName();
+      /**
+       * <code>required string name = 1;</code>
+       *
+       * <pre>
+       * name:port
+       * </pre>
+       */
+      com.google.protobuf.ByteString
+          getNameBytes();
+
+      // optional int64 startCode = 2;
+      /**
+       * <code>optional int64 startCode = 2;</code>
+       */
+      boolean hasStartCode();
+      /**
+       * <code>optional int64 startCode = 2;</code>
+       */
+      long getStartCode();
+
+      // optional int32 requests = 3;
+      /**
+       * <code>optional int32 requests = 3;</code>
+       */
+      boolean hasRequests();
+      /**
+       * <code>optional int32 requests = 3;</code>
+       */
+      int getRequests();
+
+      // optional int32 heapSizeMB = 4;
+      /**
+       * <code>optional int32 heapSizeMB = 4;</code>
+       */
+      boolean hasHeapSizeMB();
+      /**
+       * <code>optional int32 heapSizeMB = 4;</code>
+       */
+      int getHeapSizeMB();
+
+      // optional int32 maxHeapSizeMB = 5;
+      /**
+       * <code>optional int32 maxHeapSizeMB = 5;</code>
+       */
+      boolean hasMaxHeapSizeMB();
+      /**
+       * <code>optional int32 maxHeapSizeMB = 5;</code>
+       */
+      int getMaxHeapSizeMB();
+
+      // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
+      /**
+       * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
+       */
+      java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region> 
+          getRegionsList();
+      /**
+       * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
+       */
+      org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region getRegions(int index);
+      /**
+       * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
+       */
+      int getRegionsCount();
+      /**
+       * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
+       */
+      java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder> 
+          getRegionsOrBuilderList();
+      /**
+       * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
+       */
+      org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder getRegionsOrBuilder(
+          int index);
+    }
+    /**
+     * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node}
+     */
+    public static final class Node extends
+        com.google.protobuf.GeneratedMessage
+        implements NodeOrBuilder {
+      // Use Node.newBuilder() to construct.
+      private Node(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+        super(builder);
+        this.unknownFields = builder.getUnknownFields();
+      }
+      private Node(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+      private static final Node defaultInstance;
+      public static Node getDefaultInstance() {
+        return defaultInstance;
+      }
+
+      public Node getDefaultInstanceForType() {
+        return defaultInstance;
+      }
+
+      private final com.google.protobuf.UnknownFieldSet unknownFields;
+      @java.lang.Override
+      public final com.google.protobuf.UnknownFieldSet
+          getUnknownFields() {
+        return this.unknownFields;
+      }
+      private Node(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        initFields();
+        int mutable_bitField0_ = 0;
+        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+            com.google.protobuf.UnknownFieldSet.newBuilder();
+        try {
+          boolean done = false;
+          while (!done) {
+            int tag = input.readTag();
+            switch (tag) {
+              case 0:
+                done = true;
+                break;
+              default: {
+                if (!parseUnknownField(input, unknownFields,
+                                       extensionRegistry, tag)) {
+                  done = true;
+                }
+                break;
+              }
+              case 10: {
+                bitField0_ |= 0x00000001;
+                name_ = input.readBytes();
+                break;
+              }
+              case 16: {
+                bitField0_ |= 0x00000002;
+                startCode_ = input.readInt64();
+                break;
+              }
+              case 24: {
+                bitField0_ |= 0x00000004;
+                requests_ = input.readInt32();
+                break;
+              }
+              case 32: {
+                bitField0_ |= 0x00000008;
+                heapSizeMB_ = input.readInt32();
+                break;
+              }
+              case 40: {
+                bitField0_ |= 0x00000010;
+                maxHeapSizeMB_ = input.readInt32();
+                break;
+              }
+              case 50: {
+                if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
+                  regions_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region>();
+                  mutable_bitField0_ |= 0x00000020;
+                }
+                regions_.add(input.readMessage(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.PARSER, extensionRegistry));
+                break;
+              }
+            }
+          }
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          throw e.setUnfinishedMessage(this);
+        } catch (java.io.IOException e) {
+          throw new com.google.protobuf.InvalidProtocolBufferException(
+              e.getMessage()).setUnfinishedMessage(this);
+        } finally {
+          if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
+            regions_ = java.util.Collections.unmodifiableList(regions_);
+          }
+          this.unknownFields = unknownFields.build();
+          makeExtensionsImmutable();
+        }
+      }
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.class, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder.class);
+      }
+
+      public static com.google.protobuf.Parser<Node> PARSER =
+          new com.google.protobuf.AbstractParser<Node>() {
+        public Node parsePartialFrom(
+            com.google.protobuf.CodedInputStream input,
+            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+            throws com.google.protobuf.InvalidProtocolBufferException {
+          return new Node(input, extensionRegistry);
+        }
+      };
+
+      @java.lang.Override
+      public com.google.protobuf.Parser<Node> getParserForType() {
+        return PARSER;
+      }
+
+      private int bitField0_;
+      // required string name = 1;
+      public static final int NAME_FIELD_NUMBER = 1;
+      private java.lang.Object name_;
+      /**
+       * <code>required string name = 1;</code>
+       *
+       * <pre>
+       * name:port
+       * </pre>
+       */
+      public boolean hasName() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required string name = 1;</code>
+       *
+       * <pre>
+       * name:port
+       * </pre>
+       */
+      public java.lang.String getName() {
+        java.lang.Object ref = name_;
+        if (ref instanceof java.lang.String) {
+          return (java.lang.String) ref;
+        } else {
+          com.google.protobuf.ByteString bs = 
+              (com.google.protobuf.ByteString) ref;
+          java.lang.String s = bs.toStringUtf8();
+          if (bs.isValidUtf8()) {
+            name_ = s;
+          }
+          return s;
+        }
+      }
+      /**
+       * <code>required string name = 1;</code>
+       *
+       * <pre>
+       * name:port
+       * </pre>
+       */
+      public com.google.protobuf.ByteString
+          getNameBytes() {
+        java.lang.Object ref = name_;
+        if (ref instanceof java.lang.String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          name_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+
+      // optional int64 startCode = 2;
+      public static final int STARTCODE_FIELD_NUMBER = 2;
+      private long startCode_;
+      /**
+       * <code>optional int64 startCode = 2;</code>
+       */
+      public boolean hasStartCode() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * <code>optional int64 startCode = 2;</code>
+       */
+      public long getStartCode() {
+        return startCode_;
+      }
+
+      // optional int32 requests = 3;
+      public static final int REQUESTS_FIELD_NUMBER = 3;
+      private int requests_;
+      /**
+       * <code>optional int32 requests = 3;</code>
+       */
+      public boolean hasRequests() {
+        return ((bitField0_ & 0x00000004) == 0x00000004);
+      }
+      /**
+       * <code>optional int32 requests = 3;</code>
+       */
+      public int getRequests() {
+        return requests_;
+      }
+
+      // optional int32 heapSizeMB = 4;
+      public static final int HEAPSIZEMB_FIELD_NUMBER = 4;
+      private int heapSizeMB_;
+      /**
+       * <code>optional int32 heapSizeMB = 4;</code>
+       */
+      public boolean hasHeapSizeMB() {
+        return ((bitField0_ & 0x00000008) == 0x00000008);
+      }
+      /**
+       * <code>optional int32 heapSizeMB = 4;</code>
+       */
+      public int getHeapSizeMB() {
+        return heapSizeMB_;
+      }
+
+      // optional int32 maxHeapSizeMB = 5;
+      public static final int MAXHEAPSIZEMB_FIELD_NUMBER = 5;
+      private int maxHeapSizeMB_;
+      /**
+       * <code>optional int32 maxHeapSizeMB = 5;</code>
+       */
+      public boolean hasMaxHeapSizeMB() {
+        return ((bitField0_ & 0x00000010) == 0x00000010);
+      }
+      /**
+       * <code>optional int32 maxHeapSizeMB = 5;</code>
+       */
+      public int getMaxHeapSizeMB() {
+        return maxHeapSizeMB_;
+      }
+
+      // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
+      public static final int REGIONS_FIELD_NUMBER = 6;
+      private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region> regions_;
+      /**
+       * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
+       */
+      public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region> getRegionsList() {
+        return regions_;
+      }
+      /**
+       * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
+       */
+      public java.util.List<? extends org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder> 
+          getRegionsOrBuilderList() {
+        return regions_;
+      }
+      /**
+       * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
+       */
+      public int getRegionsCount() {
+        return regions_.size();
+      }
+      /**
+       * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
+       */
+      public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region getRegions(int index) {
+        return regions_.get(index);
+      }
+      /**
+       * <code>repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;</code>
+       */
+      public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder getRegionsOrBuilder(
+          int index) {
+        return regions_.get(index);
+      }
+
+      private void initFields() {
+        name_ = "";
+        startCode_ = 0L;
+        requests_ = 0;
+        heapSizeMB_ = 0;
+        maxHeapSizeMB_ = 0;
+        regions_ = java.util.Collections.emptyList();
+      }
+      private byte memoizedIsInitialized = -1;
+      public final boolean isInitialized() {
+        byte isInitialized = memoizedIsInitialized;
+        if (isInitialized != -1) return isInitialized == 1;
+
+        if (!hasName()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+        for (int i = 0; i < getRegionsCount(); i++) {
+          if (!getRegions(i).isInitialized()) {
+            memoizedIsInitialized = 0;
+            return false;
+          }
+        }
+        memoizedIsInitialized = 1;
+        return true;
+      }
+
+      public void writeTo(com.google.protobuf.CodedOutputStream output)
+                          throws java.io.IOException {
+        getSerializedSize();
+        if (((bitField0_ & 0x00000001) == 0x00000001)) {
+          output.writeBytes(1, getNameBytes());
+        }
+        if (((bitField0_ & 0x00000002) == 0x00000002)) {
+          output.writeInt64(2, startCode_);
+        }
+        if (((bitField0_ & 0x00000004) == 0x00000004)) {
+          output.writeInt32(3, requests_);
+        }
+        if (((bitField0_ & 0x00000008) == 0x00000008)) {
+          output.writeInt32(4, heapSizeMB_);
+        }
+        if (((bitField0_ & 0x00000010) == 0x00000010)) {
+          output.writeInt32(5, maxHeapSizeMB_);
+        }
+        for (int i = 0; i < regions_.size(); i++) {
+          output.writeMessage(6, regions_.get(i));
+        }
+        getUnknownFields().writeTo(output);
+      }
+
+      private int memoizedSerializedSize = -1;
+      public int getSerializedSize() {
+        int size = memoizedSerializedSize;
+        if (size != -1) return size;
+
+        size = 0;
+        if (((bitField0_ & 0x00000001) == 0x00000001)) {
+          size += com.google.protobuf.CodedOutputStream
+            .computeBytesSize(1, getNameBytes());
+        }
+        if (((bitField0_ & 0x00000002) == 0x00000002)) {
+          size += com.google.protobuf.CodedOutputStream
+            .computeInt64Size(2, startCode_);
+        }
+        if (((bitField0_ & 0x00000004) == 0x00000004)) {
+          size += com.google.protobuf.CodedOutputStream
+            .computeInt32Size(3, requests_);
+        }
+        if (((bitField0_ & 0x00000008) == 0x00000008)) {
+          size += com.google.protobuf.CodedOutputStream
+            .computeInt32Size(4, heapSizeMB_);
+        }
+        if (((bitField0_ & 0x00000010) == 0x00000010)) {
+          size += com.google.protobuf.CodedOutputStream
+            .computeInt32Size(5, maxHeapSizeMB_);
+        }
+        for (int i = 0; i < regions_.size(); i++) {
+          size += com.google.protobuf.CodedOutputStream
+            .computeMessageSize(6, regions_.get(i));
+        }
+        size += getUnknownFields().getSerializedSize();
+        memoizedSerializedSize = size;
+        return size;
+      }
+
+      private static final long serialVersionUID = 0L;
+      @java.lang.Override
+      protected java.lang.Object writeReplace()
+          throws java.io.ObjectStreamException {
+        return super.writeReplace();
+      }
+
+      public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
+          com.google.protobuf.ByteString data)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return PARSER.parseFrom(data);
+      }
+      public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
+          com.google.protobuf.ByteString data,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return PARSER.parseFrom(data, extensionRegistry);
+      }
+      public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(byte[] data)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return PARSER.parseFrom(data);
+      }
+      public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
+          byte[] data,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return PARSER.parseFrom(data, extensionRegistry);
+      }
+      public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(java.io.InputStream input)
+          throws java.io.IOException {
+        return PARSER.parseFrom(input);
+      }
+      public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
+          java.io.InputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        return PARSER.parseFrom(input, extensionRegistry);
+      }
+      public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseDelimitedFrom(java.io.InputStream input)
+          throws java.io.IOException {
+        return PARSER.parseDelimitedFrom(input);
+      }
+      public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseDelimitedFrom(
+          java.io.InputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        return PARSER.parseDelimitedFrom(input, extensionRegistry);
+      }
+      public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
+          com.google.protobuf.CodedInputStream input)
+          throws java.io.IOException {
+        return PARSER.parseFrom(input);
+      }
+      public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        return PARSER.parseFrom(input, extensionRegistry);
+      }
+
+      public static Builder newBuilder() { return Builder.create(); }
+      public Builder newBuilderForType() { return newBuilder(); }
+      public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node prototype) {
+        return newBuilder().mergeFrom(prototype);
+      }
+      public Builder toBuilder() { return newBuilder(this); }
+
+      @java.lang.Override
+      protected Builder newBuilderForType(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        Builder builder = new Builder(parent);
+        return builder;
+      }
+      /**
+       * Protobuf type {@code org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node}
+       */
+      public static final class Builder extends
+          com.google.protobuf.GeneratedMessage.Builder<Builder>
+         implements org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.NodeOrBuilder {
+        public static final com.google.protobuf.Descriptors.Descriptor
+            getDescriptor() {
+          return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_descriptor;
+        }
+
+        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+            internalGetFieldAccessorTable() {
+          return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_fieldAccessorTable
+              .ensureFieldAccessorsInitialized(
+                  org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.class, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder.class);
+        }
+
+        // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.newBuilder()
+        private Builder() {
+          maybeForceBuilderInitialization();
+        }
+
+        private Builder(
+            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+          super(parent);
+          maybeForceBuilderInitialization();
+        }
+        private void maybeForceBuilderInitialization() {
+          if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+            getRegionsFieldBuilder();
+          }
+        }
+        private static Builder create() {
+          return new Builder();
+        }
+
+        public Builder clear() {
+          super.clear();
+          name_ = "";
+          bitField0_ = (bitField0_ & ~0x00000001);
+          startCode_ = 0L;
+          bitField0_ = (bitField0_ & ~0x00000002);
+          requests_ = 0;
+          bitField0_ = (bitField0_ & ~0x00000004);
+          heapSizeMB_ = 0;
+          bitField0_ = (bitField0_ & ~0x00000008);
+          maxHeapSizeMB_ = 0;
+          bitField0_ = (bitField0_ & ~0x00000010);
+          if (regionsBuilder_ == null) {
+            regions_ = java.util.Collections.emptyList();
+            bitField0_ = (bitField0_ & ~0x00000020);
+          } else {
+            regionsBuilder_.clear();
+          }
+          return this;
+        }
+
+        public Builder clone() {
+          return create().mergeFrom(buildPartial());
+        }
+
+        public com.google.protobuf.Descriptors.Descriptor
+            getDescriptorForType() {
+          return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_descriptor;
+        }
+
+        public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node getDefaultInstanceForType() {
+          return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.getDefaultInstance();
+        }
+
+        public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node build() {
+          org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node result = buildPartial();
+          if (!result.isInitialized()) {
+            throw newUninitializedMessageException(result);
+          }
+          return result;
+        }
+
+        public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node buildPartial() {
+          org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node result = new org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node(this);
+          int from_bitField0_ = bitField0_;
+          int to_bitField0_ = 0;
+          if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+            to_bitField0_ |= 0x00000001;
+          }
+          result.name_ = name_;
+          if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+            to_bitField0_ |= 0x00000002;
+          }
+          result.startCode_ = startCode_;
+          if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+            to_bitField0_ |= 0x00000004;
+          }
+          result.requests_ = requests_;
+          if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+            to_bitField0_ |= 0x00000008;
+          }
+          result.heapSizeMB_ = heapSizeMB_;
+          if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+            to_bitField0_ |= 0x00000010;
+          }
+          result.maxHeapSizeMB_ = maxHeapSizeMB_;
+          if (regionsBuilder_ == null) {
+            if (((bitField0_ & 0x00000020) == 0x00000020)) {
+              regions_ = java.util.Collections.unmodifiableList(regions_);
+              bitField0_ = (bitField0_ & ~0x00000020);
+            }
+            result.regions_ = regions_;
+          } else {
+            result.regions_ = regionsBuilder_.build();
+          }
+          result.bitField0_ = to_bitField0_;
+          onBuilt();
+          return result;
+        }
+
+        public Builder mergeFrom(com.google.protobuf.Message other) {
+          if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node) {
+            return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node)other);
+          } else {
+            super.mergeFrom(other);
+            return this;
+          }
+        }
+
+        public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node other) {
+          if (other == org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.getDefaultInstance()) return this;
+          if (other.hasName()) {
+            bitField0_ |= 0x00000001;
+            name_ = other.name_;
+            onChanged();
+          }
+          if (other.hasStartCode()) {
+            setStartCode(other.getStartCode());
+          }
+          if (other.hasRequests()) {
+            setRequests(other.getRequests());
+          }
+          if (other.hasHeapSizeMB()) {
+            setHeapSizeMB(other.getHeapSizeMB());
+          }
+          if (other.hasMaxHeapSizeMB()) {
+            setMaxHeapSizeMB(other.getMaxHeapSizeMB());
+          }
+          if (regionsBuilder_ == null) {
+            if (!other.regions_.isEmpty()) {
+              if (regions_.isEmpty()) {
+                regions_ = other.regions_;
+                bitField0_ = (bitField0_ & ~0x00000020);
+              } else {
+                ensureRegionsIsMutable();
+                regions_.addAll(other.regions_);
+              }
+              onChanged();
+            }
+          } else {
+            if (!other.regions_.isEmpty()) {
+              if (regionsBuilder_.isEmpty()) {
+                regionsBuilder_.dispose();
+                regionsBuilder_ = null;
+                regions_ = other.regions_;
+                bitField0_ = (bitField0_ & ~0x00000020);
+     

<TRUNCATED>

Mime
View raw message