Return-Path: X-Original-To: apmail-hbase-commits-archive@www.apache.org Delivered-To: apmail-hbase-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 06DA910FBB for ; Wed, 21 Aug 2013 05:05:16 +0000 (UTC) Received: (qmail 25298 invoked by uid 500); 21 Aug 2013 05:05:15 -0000 Delivered-To: apmail-hbase-commits-archive@hbase.apache.org Received: (qmail 25093 invoked by uid 500); 21 Aug 2013 05:05:15 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 25081 invoked by uid 99); 21 Aug 2013 05:05:14 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 21 Aug 2013 05:05:14 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED,T_FILL_THIS_FORM_SHORT X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 21 Aug 2013 05:05:11 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id C9B542388BA2; Wed, 21 Aug 2013 05:04:27 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1516084 [9/43] - in /hbase/trunk: ./ hbase-client/src/main/java/org/apache/hadoop/hbase/ hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ hbase-common/src/test/java/org/apache/hadoop/hbase/ hbase-protocol/src/main/java/org/apac... Date: Wed, 21 Aug 2013 05:04:22 -0000 To: commits@hbase.apache.org From: stack@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20130821050427.C9B542388BA2@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Modified: hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java?rev=1516084&r1=1516083&r2=1516084&view=diff ============================================================================== --- hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java (original) +++ hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java Wed Aug 21 05:04:20 2013 @@ -10,82 +10,379 @@ public final class ClusterStatusProtos { } public interface RegionStateOrBuilder extends com.google.protobuf.MessageOrBuilder { - + // required .RegionInfo region_info = 1; + /** + * required .RegionInfo region_info = 1; + */ boolean hasRegionInfo(); + /** + * required .RegionInfo region_info = 1; + */ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(); + /** + * required .RegionInfo region_info = 1; + */ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(); - + // required .RegionState.State state = 2; + /** + * required .RegionState.State state = 2; + */ boolean hasState(); + /** + * required .RegionState.State state = 2; + */ org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State getState(); - + // optional uint64 stamp = 3; + /** + * optional uint64 stamp = 3; + */ boolean hasStamp(); + /** + * optional uint64 stamp = 3; + */ long getStamp(); } + /** + * Protobuf type {@code RegionState} + */ public static final class RegionState extends com.google.protobuf.GeneratedMessage implements RegionStateOrBuilder { // Use RegionState.newBuilder() to construct. - private RegionState(Builder builder) { + private RegionState(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private RegionState(boolean noInit) {} - + private RegionState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private static final RegionState defaultInstance; public static RegionState getDefaultInstance() { return defaultInstance; } - + public RegionState getDefaultInstanceForType() { return defaultInstance; } - + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RegionState( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = regionInfo_.toBuilder(); + } + regionInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(regionInfo_); + regionInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 16: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State value = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(2, rawValue); + } else { + bitField0_ |= 0x00000002; + state_ = value; + } + break; + } + case 24: { + bitField0_ |= 0x00000004; + stamp_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionState_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionState_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionState_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RegionState parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RegionState(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; } - + + /** + * Protobuf enum {@code RegionState.State} + */ public enum State implements com.google.protobuf.ProtocolMessageEnum { + /** + * OFFLINE = 0; + * + *
+       * region is in an offline state
+       * 
+ */ OFFLINE(0, 0), + /** + * PENDING_OPEN = 1; + * + *
+       * sent rpc to server to open but has not begun
+       * 
+ */ PENDING_OPEN(1, 1), + /** + * OPENING = 2; + * + *
+       * server has begun to open but not yet done
+       * 
+ */ OPENING(2, 2), + /** + * OPEN = 3; + * + *
+       * server opened region and updated meta
+       * 
+ */ OPEN(3, 3), + /** + * PENDING_CLOSE = 4; + * + *
+       * sent rpc to server to close but has not begun
+       * 
+ */ PENDING_CLOSE(4, 4), + /** + * CLOSING = 5; + * + *
+       * server has begun to close but not yet done
+       * 
+ */ CLOSING(5, 5), + /** + * CLOSED = 6; + * + *
+       * server closed region and updated meta
+       * 
+ */ CLOSED(6, 6), + /** + * SPLITTING = 7; + * + *
+       * server started split of a region
+       * 
+ */ SPLITTING(7, 7), + /** + * SPLIT = 8; + * + *
+       * server completed split of a region
+       * 
+ */ SPLIT(8, 8), + /** + * FAILED_OPEN = 9; + * + *
+       * failed to open, and won't retry any more
+       * 
+ */ FAILED_OPEN(9, 9), + /** + * FAILED_CLOSE = 10; + * + *
+       * failed to close, and won't retry any more
+       * 
+ */ FAILED_CLOSE(10, 10), + /** + * MERGING = 11; + * + *
+       * server started merge a region
+       * 
+ */ MERGING(11, 11), + /** + * MERGED = 12; + * + *
+       * server completed merge of a region
+       * 
+ */ MERGED(12, 12), ; - + + /** + * OFFLINE = 0; + * + *
+       * region is in an offline state
+       * 
+ */ public static final int OFFLINE_VALUE = 0; + /** + * PENDING_OPEN = 1; + * + *
+       * sent rpc to server to open but has not begun
+       * 
+ */ public static final int PENDING_OPEN_VALUE = 1; + /** + * OPENING = 2; + * + *
+       * server has begun to open but not yet done
+       * 
+ */ public static final int OPENING_VALUE = 2; + /** + * OPEN = 3; + * + *
+       * server opened region and updated meta
+       * 
+ */ public static final int OPEN_VALUE = 3; + /** + * PENDING_CLOSE = 4; + * + *
+       * sent rpc to server to close but has not begun
+       * 
+ */ public static final int PENDING_CLOSE_VALUE = 4; + /** + * CLOSING = 5; + * + *
+       * server has begun to close but not yet done
+       * 
+ */ public static final int CLOSING_VALUE = 5; + /** + * CLOSED = 6; + * + *
+       * server closed region and updated meta
+       * 
+ */ public static final int CLOSED_VALUE = 6; + /** + * SPLITTING = 7; + * + *
+       * server started split of a region
+       * 
+ */ public static final int SPLITTING_VALUE = 7; + /** + * SPLIT = 8; + * + *
+       * server completed split of a region
+       * 
+ */ public static final int SPLIT_VALUE = 8; + /** + * FAILED_OPEN = 9; + * + *
+       * failed to open, and won't retry any more
+       * 
+ */ public static final int FAILED_OPEN_VALUE = 9; + /** + * FAILED_CLOSE = 10; + * + *
+       * failed to close, and won't retry any more
+       * 
+ */ public static final int FAILED_CLOSE_VALUE = 10; + /** + * MERGING = 11; + * + *
+       * server started merge a region
+       * 
+ */ public static final int MERGING_VALUE = 11; + /** + * MERGED = 12; + * + *
+       * server completed merge of a region
+       * 
+ */ public static final int MERGED_VALUE = 12; - - + + public final int getNumber() { return value; } - + public static State valueOf(int value) { switch (value) { case 0: return OFFLINE; @@ -104,7 +401,7 @@ public final class ClusterStatusProtos { default: return null; } } - + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; @@ -116,7 +413,7 @@ public final class ClusterStatusProtos { return State.valueOf(number); } }; - + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(index); @@ -129,11 +426,9 @@ public final class ClusterStatusProtos { getDescriptor() { return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.getDescriptor().getEnumTypes().get(0); } - - private static final State[] VALUES = { - OFFLINE, PENDING_OPEN, OPENING, OPEN, PENDING_CLOSE, CLOSING, CLOSED, SPLITTING, SPLIT, FAILED_OPEN, FAILED_CLOSE, MERGING, MERGED, - }; - + + private static final State[] VALUES = values(); + public static State valueOf( com.google.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { @@ -142,52 +437,73 @@ public final class ClusterStatusProtos { } return VALUES[desc.getIndex()]; } - + private final int index; private final int value; - + private State(int index, int value) { this.index = index; this.value = value; } - + // @@protoc_insertion_point(enum_scope:RegionState.State) } - + private int bitField0_; // required .RegionInfo region_info = 1; public static final int REGION_INFO_FIELD_NUMBER = 1; private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo regionInfo_; + /** + * required .RegionInfo region_info = 1; + */ public boolean hasRegionInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } + /** + * required .RegionInfo region_info = 1; + */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() { return regionInfo_; } + /** + * required .RegionInfo region_info = 1; + */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() { return regionInfo_; } - + // required .RegionState.State state = 2; public static final int STATE_FIELD_NUMBER = 2; private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State state_; + /** + * required .RegionState.State state = 2; + */ public boolean hasState() { return ((bitField0_ & 0x00000002) == 0x00000002); } + /** + * required .RegionState.State state = 2; + */ public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State getState() { return state_; } - + // optional uint64 stamp = 3; public static final int STAMP_FIELD_NUMBER = 3; private long stamp_; + /** + * optional uint64 stamp = 3; + */ public boolean hasStamp() { return ((bitField0_ & 0x00000004) == 0x00000004); } + /** + * optional uint64 stamp = 3; + */ public long getStamp() { return stamp_; } - + private void initFields() { regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); state_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State.OFFLINE; @@ -197,7 +513,7 @@ public final class ClusterStatusProtos { public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - + if (!hasRegionInfo()) { memoizedIsInitialized = 0; return false; @@ -213,7 +529,7 @@ public final class ClusterStatusProtos { memoizedIsInitialized = 1; return true; } - + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); @@ -228,12 +544,12 @@ public final class ClusterStatusProtos { } getUnknownFields().writeTo(output); } - + private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; - + size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream @@ -251,14 +567,14 @@ public final class ClusterStatusProtos { memoizedSerializedSize = size; return size; } - + private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } - + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -268,7 +584,7 @@ public final class ClusterStatusProtos { return super.equals(obj); } org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState) obj; - + boolean result = true; result = result && (hasRegionInfo() == other.hasRegionInfo()); if (hasRegionInfo()) { @@ -289,9 +605,13 @@ public final class ClusterStatusProtos { getUnknownFields().equals(other.getUnknownFields()); return result; } - + + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasRegionInfo()) { @@ -307,89 +627,79 @@ public final class ClusterStatusProtos { hash = (53 * hash) + hashLong(getStamp()); } hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; return hash; } - + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); + return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); + return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); + return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); + return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parseFrom(java.io.InputStream input) throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); + return PARSER.parseFrom(input, extensionRegistry); } - + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } - + @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } + /** + * Protobuf type {@code RegionState} + */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStateOrBuilder { @@ -397,18 +707,21 @@ public final class ClusterStatusProtos { getDescriptor() { return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionState_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionState_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionState_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.Builder.class); } - + // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.newBuilder() private Builder() { maybeForceBuilderInitialization(); } - - private Builder(BuilderParent parent) { + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -420,7 +733,7 @@ public final class ClusterStatusProtos { private static Builder create() { return new Builder(); } - + public Builder clear() { super.clear(); if (regionInfoBuilder_ == null) { @@ -435,20 +748,20 @@ public final class ClusterStatusProtos { bitField0_ = (bitField0_ & ~0x00000004); return this; } - + public Builder clone() { return create().mergeFrom(buildPartial()); } - + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.getDescriptor(); + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionState_descriptor; } - + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState getDefaultInstanceForType() { return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.getDefaultInstance(); } - + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState build() { org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState result = buildPartial(); if (!result.isInitialized()) { @@ -456,17 +769,7 @@ public final class ClusterStatusProtos { } return result; } - - private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState buildPartial() { org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState(this); int from_bitField0_ = bitField0_; @@ -491,7 +794,7 @@ public final class ClusterStatusProtos { onBuilt(); return result; } - + public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState) { return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState)other); @@ -500,7 +803,7 @@ public final class ClusterStatusProtos { return this; } } - + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState other) { if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.getDefaultInstance()) return this; if (other.hasRegionInfo()) { @@ -515,7 +818,7 @@ public final class ClusterStatusProtos { this.mergeUnknownFields(other.getUnknownFields()); return this; } - + public final boolean isInitialized() { if (!hasRegionInfo()) { @@ -531,68 +834,39 @@ public final class ClusterStatusProtos { } return true; } - + public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(); - if (hasRegionInfo()) { - subBuilder.mergeFrom(getRegionInfo()); - } - input.readMessage(subBuilder, extensionRegistry); - setRegionInfo(subBuilder.buildPartial()); - break; - } - case 16: { - int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State value = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(2, rawValue); - } else { - bitField0_ |= 0x00000002; - state_ = value; - } - break; - } - case 24: { - bitField0_ |= 0x00000004; - stamp_ = input.readUInt64(); - break; - } + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); } } + return this; } - private int bitField0_; - + // required .RegionInfo region_info = 1; private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_; + /** + * required .RegionInfo region_info = 1; + */ public boolean hasRegionInfo() { return ((bitField0_ & 0x00000001) == 0x00000001); } + /** + * required .RegionInfo region_info = 1; + */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() { if (regionInfoBuilder_ == null) { return regionInfo_; @@ -600,6 +874,9 @@ public final class ClusterStatusProtos { return regionInfoBuilder_.getMessage(); } } + /** + * required .RegionInfo region_info = 1; + */ public Builder setRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { if (regionInfoBuilder_ == null) { if (value == null) { @@ -613,6 +890,9 @@ public final class ClusterStatusProtos { bitField0_ |= 0x00000001; return this; } + /** + * required .RegionInfo region_info = 1; + */ public Builder setRegionInfo( org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { if (regionInfoBuilder_ == null) { @@ -624,6 +904,9 @@ public final class ClusterStatusProtos { bitField0_ |= 0x00000001; return this; } + /** + * required .RegionInfo region_info = 1; + */ public Builder mergeRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { if (regionInfoBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && @@ -640,6 +923,9 @@ public final class ClusterStatusProtos { bitField0_ |= 0x00000001; return this; } + /** + * required .RegionInfo region_info = 1; + */ public Builder clearRegionInfo() { if (regionInfoBuilder_ == null) { regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); @@ -650,11 +936,17 @@ public final class ClusterStatusProtos { bitField0_ = (bitField0_ & ~0x00000001); return this; } + /** + * required .RegionInfo region_info = 1; + */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder() { bitField0_ |= 0x00000001; onChanged(); return getRegionInfoFieldBuilder().getBuilder(); } + /** + * required .RegionInfo region_info = 1; + */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() { if (regionInfoBuilder_ != null) { return regionInfoBuilder_.getMessageOrBuilder(); @@ -662,6 +954,9 @@ public final class ClusterStatusProtos { return regionInfo_; } } + /** + * required .RegionInfo region_info = 1; + */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> getRegionInfoFieldBuilder() { @@ -675,15 +970,24 @@ public final class ClusterStatusProtos { } return regionInfoBuilder_; } - + // required .RegionState.State state = 2; private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State state_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State.OFFLINE; + /** + * required .RegionState.State state = 2; + */ public boolean hasState() { return ((bitField0_ & 0x00000002) == 0x00000002); } + /** + * required .RegionState.State state = 2; + */ public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State getState() { return state_; } + /** + * required .RegionState.State state = 2; + */ public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State value) { if (value == null) { throw new NullPointerException(); @@ -693,113 +997,252 @@ public final class ClusterStatusProtos { onChanged(); return this; } + /** + * required .RegionState.State state = 2; + */ public Builder clearState() { bitField0_ = (bitField0_ & ~0x00000002); state_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State.OFFLINE; onChanged(); return this; } - + // optional uint64 stamp = 3; private long stamp_ ; + /** + * optional uint64 stamp = 3; + */ public boolean hasStamp() { return ((bitField0_ & 0x00000004) == 0x00000004); } + /** + * optional uint64 stamp = 3; + */ public long getStamp() { return stamp_; } + /** + * optional uint64 stamp = 3; + */ public Builder setStamp(long value) { bitField0_ |= 0x00000004; stamp_ = value; onChanged(); return this; } + /** + * optional uint64 stamp = 3; + */ public Builder clearStamp() { bitField0_ = (bitField0_ & ~0x00000004); stamp_ = 0L; onChanged(); return this; } - + // @@protoc_insertion_point(builder_scope:RegionState) } - + static { defaultInstance = new RegionState(true); defaultInstance.initFields(); } - + // @@protoc_insertion_point(class_scope:RegionState) } - + public interface RegionInTransitionOrBuilder extends com.google.protobuf.MessageOrBuilder { - + // required .RegionSpecifier spec = 1; + /** + * required .RegionSpecifier spec = 1; + */ boolean hasSpec(); + /** + * required .RegionSpecifier spec = 1; + */ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getSpec(); + /** + * required .RegionSpecifier spec = 1; + */ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getSpecOrBuilder(); - + // required .RegionState region_state = 2; + /** + * required .RegionState region_state = 2; + */ boolean hasRegionState(); + /** + * required .RegionState region_state = 2; + */ org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState getRegionState(); + /** + * required .RegionState region_state = 2; + */ org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStateOrBuilder getRegionStateOrBuilder(); } + /** + * Protobuf type {@code RegionInTransition} + */ public static final class RegionInTransition extends com.google.protobuf.GeneratedMessage implements RegionInTransitionOrBuilder { // Use RegionInTransition.newBuilder() to construct. - private RegionInTransition(Builder builder) { + private RegionInTransition(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private RegionInTransition(boolean noInit) {} - + private RegionInTransition(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private static final RegionInTransition defaultInstance; public static RegionInTransition getDefaultInstance() { return defaultInstance; } - + public RegionInTransition getDefaultInstanceForType() { return defaultInstance; } - + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RegionInTransition( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = spec_.toBuilder(); + } + spec_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(spec_); + spec_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = regionState_.toBuilder(); + } + regionState_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(regionState_); + regionState_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionInTransition_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionInTransition_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionInTransition_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RegionInTransition parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RegionInTransition(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; } - + private int bitField0_; // required .RegionSpecifier spec = 1; public static final int SPEC_FIELD_NUMBER = 1; private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier spec_; + /** + * required .RegionSpecifier spec = 1; + */ public boolean hasSpec() { return ((bitField0_ & 0x00000001) == 0x00000001); } + /** + * required .RegionSpecifier spec = 1; + */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getSpec() { return spec_; } + /** + * required .RegionSpecifier spec = 1; + */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getSpecOrBuilder() { return spec_; } - + // required .RegionState region_state = 2; public static final int REGION_STATE_FIELD_NUMBER = 2; private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState regionState_; + /** + * required .RegionState region_state = 2; + */ public boolean hasRegionState() { return ((bitField0_ & 0x00000002) == 0x00000002); } + /** + * required .RegionState region_state = 2; + */ public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState getRegionState() { return regionState_; } + /** + * required .RegionState region_state = 2; + */ public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStateOrBuilder getRegionStateOrBuilder() { return regionState_; } - + private void initFields() { spec_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); regionState_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.getDefaultInstance(); @@ -808,7 +1251,7 @@ public final class ClusterStatusProtos { public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - + if (!hasSpec()) { memoizedIsInitialized = 0; return false; @@ -828,7 +1271,7 @@ public final class ClusterStatusProtos { memoizedIsInitialized = 1; return true; } - + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); @@ -840,12 +1283,12 @@ public final class ClusterStatusProtos { } getUnknownFields().writeTo(output); } - + private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; - + size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream @@ -859,14 +1302,14 @@ public final class ClusterStatusProtos { memoizedSerializedSize = size; return size; } - + private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } - + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -876,7 +1319,7 @@ public final class ClusterStatusProtos { return super.equals(obj); } org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition) obj; - + boolean result = true; result = result && (hasSpec() == other.hasSpec()); if (hasSpec()) { @@ -892,9 +1335,13 @@ public final class ClusterStatusProtos { getUnknownFields().equals(other.getUnknownFields()); return result; } - + + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSpec()) { @@ -906,89 +1353,79 @@ public final class ClusterStatusProtos { hash = (53 * hash) + getRegionState().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; return hash; } - + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); + return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); + return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); + return PARSER.parseFrom(data); } public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); + return PARSER.parseFrom(data, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom(java.io.InputStream input) throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); + return PARSER.parseFrom(input, extensionRegistry); } - + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } - + @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } + /** + * Protobuf type {@code RegionInTransition} + */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransitionOrBuilder { @@ -996,18 +1433,21 @@ public final class ClusterStatusProtos { getDescriptor() { return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionInTransition_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionInTransition_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionInTransition_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder.class); } - + // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.newBuilder() private Builder() { maybeForceBuilderInitialization(); } - - private Builder(BuilderParent parent) { + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -1020,7 +1460,7 @@ public final class ClusterStatusProtos { private static Builder create() { return new Builder(); } - + public Builder clear() { super.clear(); if (specBuilder_ == null) { @@ -1037,20 +1477,20 @@ public final class ClusterStatusProtos { bitField0_ = (bitField0_ & ~0x00000002); return this; } - + public Builder clone() { return create().mergeFrom(buildPartial()); } - + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.getDescriptor(); + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionInTransition_descriptor; } - + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition getDefaultInstanceForType() { return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.getDefaultInstance(); } - + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition build() { org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition result = buildPartial(); if (!result.isInitialized()) { @@ -1058,17 +1498,7 @@ public final class ClusterStatusProtos { } return result; } - - private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition buildPartial() { org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition(this); int from_bitField0_ = bitField0_; @@ -1093,7 +1523,7 @@ public final class ClusterStatusProtos { onBuilt(); return result; } - + public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition) { return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition)other); @@ -1102,7 +1532,7 @@ public final class ClusterStatusProtos { return this; } } - + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition other) { if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.getDefaultInstance()) return this; if (other.hasSpec()) { @@ -1114,7 +1544,7 @@ public final class ClusterStatusProtos { this.mergeUnknownFields(other.getUnknownFields()); return this; } - + public final boolean isInitialized() { if (!hasSpec()) { @@ -1134,61 +1564,39 @@ public final class ClusterStatusProtos { } return true; } - + public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(); - if (hasSpec()) { - subBuilder.mergeFrom(getSpec()); - } - input.readMessage(subBuilder, extensionRegistry); - setSpec(subBuilder.buildPartial()); - break; - } - case 18: { - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.newBuilder(); - if (hasRegionState()) { - subBuilder.mergeFrom(getRegionState()); - } - input.readMessage(subBuilder, extensionRegistry); - setRegionState(subBuilder.buildPartial()); - break; - } + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); } } + return this; } - private int bitField0_; - + // required .RegionSpecifier spec = 1; private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier spec_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> specBuilder_; + /** + * required .RegionSpecifier spec = 1; + */ public boolean hasSpec() { return ((bitField0_ & 0x00000001) == 0x00000001); } + /** + * required .RegionSpecifier spec = 1; + */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getSpec() { if (specBuilder_ == null) { return spec_; @@ -1196,6 +1604,9 @@ public final class ClusterStatusProtos { return specBuilder_.getMessage(); } } + /** + * required .RegionSpecifier spec = 1; + */ public Builder setSpec(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) { if (specBuilder_ == null) { if (value == null) { @@ -1209,6 +1620,9 @@ public final class ClusterStatusProtos { bitField0_ |= 0x00000001; return this; } + /** + * required .RegionSpecifier spec = 1; + */ public Builder setSpec( org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { if (specBuilder_ == null) { @@ -1220,6 +1634,9 @@ public final class ClusterStatusProtos { bitField0_ |= 0x00000001; return this; } + /** + * required .RegionSpecifier spec = 1; + */ public Builder mergeSpec(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) { if (specBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && @@ -1236,6 +1653,9 @@ public final class ClusterStatusProtos { bitField0_ |= 0x00000001; return this; } + /** + * required .RegionSpecifier spec = 1; + */ public Builder clearSpec() { if (specBuilder_ == null) { spec_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); @@ -1246,11 +1666,17 @@ public final class ClusterStatusProtos { bitField0_ = (bitField0_ & ~0x00000001); return this; } + /** + * required .RegionSpecifier spec = 1; + */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getSpecBuilder() { bitField0_ |= 0x00000001; onChanged(); return getSpecFieldBuilder().getBuilder(); } + /** + * required .RegionSpecifier spec = 1; + */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getSpecOrBuilder() { if (specBuilder_ != null) { return specBuilder_.getMessageOrBuilder(); @@ -1258,6 +1684,9 @@ public final class ClusterStatusProtos { return spec_; } } + /** + * required .RegionSpecifier spec = 1; + */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> getSpecFieldBuilder() { @@ -1271,14 +1700,20 @@ public final class ClusterStatusProtos { } return specBuilder_; } - + // required .RegionState region_state = 2; private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState regionState_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStateOrBuilder> regionStateBuilder_; + /** + * required .RegionState region_state = 2; + */ public boolean hasRegionState() { return ((bitField0_ & 0x00000002) == 0x00000002); } + /** + * required .RegionState region_state = 2; + */ public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState getRegionState() { if (regionStateBuilder_ == null) { return regionState_; @@ -1286,6 +1721,9 @@ public final class ClusterStatusProtos { return regionStateBuilder_.getMessage(); } } + /** + * required .RegionState region_state = 2; + */ public Builder setRegionState(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState value) { if (regionStateBuilder_ == null) { if (value == null) { @@ -1299,6 +1737,9 @@ public final class ClusterStatusProtos { bitField0_ |= 0x00000002; return this; } + /** + * required .RegionState region_state = 2; + */ public Builder setRegionState( org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.Builder builderForValue) { if (regionStateBuilder_ == null) { @@ -1310,6 +1751,9 @@ public final class ClusterStatusProtos { bitField0_ |= 0x00000002; return this; } + /** + * required .RegionState region_state = 2; + */ public Builder mergeRegionState(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState value) { if (regionStateBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && @@ -1326,6 +1770,9 @@ public final class ClusterStatusProtos { bitField0_ |= 0x00000002; return this; } + /** + * required .RegionState region_state = 2; + */ public Builder clearRegionState() { if (regionStateBuilder_ == null) { regionState_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.getDefaultInstance(); @@ -1336,11 +1783,17 @@ public final class ClusterStatusProtos { bitField0_ = (bitField0_ & ~0x00000002); return this; } + /** + * required .RegionState region_state = 2; + */ public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.Builder getRegionStateBuilder() { bitField0_ |= 0x00000002; onChanged(); return getRegionStateFieldBuilder().getBuilder(); } + /** + * required .RegionState region_state = 2; + */ public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStateOrBuilder getRegionStateOrBuilder() { if (regionStateBuilder_ != null) { return regionStateBuilder_.getMessageOrBuilder(); @@ -1348,6 +1801,9 @@ public final class ClusterStatusProtos { return regionState_; } } + /** + * required .RegionState region_state = 2; + */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStateOrBuilder> getRegionStateFieldBuilder() { @@ -1361,264 +1817,859 @@ public final class ClusterStatusProtos { } return regionStateBuilder_; } - + // @@protoc_insertion_point(builder_scope:RegionInTransition) } - + static { defaultInstance = new RegionInTransition(true); defaultInstance.initFields(); } - + // @@protoc_insertion_point(class_scope:RegionInTransition) } - + public interface RegionLoadOrBuilder extends com.google.protobuf.MessageOrBuilder { - + // required .RegionSpecifier region_specifier = 1; + /** + * required .RegionSpecifier region_specifier = 1; + * + *
+     ** the region specifier 
+     * 
+ */ boolean hasRegionSpecifier(); + /** + * required .RegionSpecifier region_specifier = 1; + * + *
+     ** the region specifier 
+     * 
+ */ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegionSpecifier(); + /** + * required .RegionSpecifier region_specifier = 1; + * + *
+     ** the region specifier 
+     * 
+ */ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionSpecifierOrBuilder(); - + // optional uint32 stores = 2; + /** + * optional uint32 stores = 2; + * + *
+     ** the number of stores for the region 
+     * 
+ */ boolean hasStores(); + /** + * optional uint32 stores = 2; + * + *
+     ** the number of stores for the region 
+     * 
+ */ int getStores(); - + // optional uint32 storefiles = 3; + /** + * optional uint32 storefiles = 3; + * + *
+     ** the number of storefiles for the region 
+     * 
+ */ boolean hasStorefiles(); + /** + * optional uint32 storefiles = 3; + * + *
+     ** the number of storefiles for the region 
+     * 
+ */ int getStorefiles(); - + // optional uint32 store_uncompressed_size_MB = 4; + /** + * optional uint32 store_uncompressed_size_MB = 4; + * + *
+     ** the total size of the store files for the region, uncompressed, in MB 
+     * 
+ */ boolean hasStoreUncompressedSizeMB(); + /** + * optional uint32 store_uncompressed_size_MB = 4; + * + *
+     ** the total size of the store files for the region, uncompressed, in MB 
+     * 
+ */ int getStoreUncompressedSizeMB(); - + // optional uint32 storefile_size_MB = 5; + /** + * optional uint32 storefile_size_MB = 5; + * + *
+     ** the current total size of the store files for the region, in MB 
+     * 
+ */ boolean hasStorefileSizeMB(); + /** + * optional uint32 storefile_size_MB = 5; + * + *
+     ** the current total size of the store files for the region, in MB 
+     * 
+ */ int getStorefileSizeMB(); - + // optional uint32 memstore_size_MB = 6; + /** + * optional uint32 memstore_size_MB = 6; + * + *
+     ** the current size of the memstore for the region, in MB 
+     * 
+ */ boolean hasMemstoreSizeMB(); + /** + * optional uint32 memstore_size_MB = 6; + * + *
+     ** the current size of the memstore for the region, in MB 
+     * 
+ */ int getMemstoreSizeMB(); - + // optional uint32 storefile_index_size_MB = 7; + /** + * optional uint32 storefile_index_size_MB = 7; + * + *
+     **
+     * The current total size of root-level store file indexes for the region,
+     * in MB. The same as {@link #rootIndexSizeKB} but in MB.
+     * 
+ */ boolean hasStorefileIndexSizeMB(); + /** + * optional uint32 storefile_index_size_MB = 7; + * + *
+     **
+     * The current total size of root-level store file indexes for the region,
+     * in MB. The same as {@link #rootIndexSizeKB} but in MB.
+     * 
+ */ int getStorefileIndexSizeMB(); - + // optional uint64 read_requests_count = 8; + /** + * optional uint64 read_requests_count = 8; + * + *
+     ** the current total read requests made to region 
+     * 
+ */ boolean hasReadRequestsCount(); + /** + * optional uint64 read_requests_count = 8; + * + *
+     ** the current total read requests made to region 
+     * 
+ */ long getReadRequestsCount(); - + // optional uint64 write_requests_count = 9; + /** + * optional uint64 write_requests_count = 9; + * + *
+     ** the current total write requests made to region 
+     * 
+ */ boolean hasWriteRequestsCount(); + /** + * optional uint64 write_requests_count = 9; + * + *
+     ** the current total write requests made to region 
+     * 
+ */ long getWriteRequestsCount(); - + // optional uint64 total_compacting_KVs = 10; + /** + * optional uint64 total_compacting_KVs = 10; + * + *
+     ** the total compacting key values in currently running compaction 
+     * 
+ */ boolean hasTotalCompactingKVs(); + /** + * optional uint64 total_compacting_KVs = 10; + * + *
+     ** the total compacting key values in currently running compaction 
+     * 
+ */ long getTotalCompactingKVs(); - + // optional uint64 current_compacted_KVs = 11; + /** + * optional uint64 current_compacted_KVs = 11; + * + *
+     ** the completed count of key values in currently running compaction 
+     * 
+ */ boolean hasCurrentCompactedKVs(); + /** + * optional uint64 current_compacted_KVs = 11; + * + *
+     ** the completed count of key values in currently running compaction 
+     * 
+ */ long getCurrentCompactedKVs(); - + // optional uint32 root_index_size_KB = 12; + /** + * optional uint32 root_index_size_KB = 12; + * + *
+     ** The current total size of root-level indexes for the region, in KB. 
+     * 
+ */ boolean hasRootIndexSizeKB(); + /** + * optional uint32 root_index_size_KB = 12; + * + *
+     ** The current total size of root-level indexes for the region, in KB. 
+     * 
+ */ int getRootIndexSizeKB(); - + // optional uint32 total_static_index_size_KB = 13; + /** + * optional uint32 total_static_index_size_KB = 13; + * + *
+     ** The total size of all index blocks, not just the root level, in KB. 
+     * 
+ */ boolean hasTotalStaticIndexSizeKB(); + /** + * optional uint32 total_static_index_size_KB = 13; + * + *
+     ** The total size of all index blocks, not just the root level, in KB. 
+     * 
+ */ int getTotalStaticIndexSizeKB(); - + // optional uint32 total_static_bloom_size_KB = 14; + /** + * optional uint32 total_static_bloom_size_KB = 14; + * + *
+     **
+     * The total size of all Bloom filter blocks, not just loaded into the
+     * block cache, in KB.
+     * 
+ */ boolean hasTotalStaticBloomSizeKB(); + /** + * optional uint32 total_static_bloom_size_KB = 14; + * + *
+     **
+     * The total size of all Bloom filter blocks, not just loaded into the
+     * block cache, in KB.
+     * 
+ */ int getTotalStaticBloomSizeKB(); - + // optional uint64 complete_sequence_id = 15; + /** + * optional uint64 complete_sequence_id = 15; + * + *
+     ** the most recent sequence Id from cache flush 
+     * 
+ */ boolean hasCompleteSequenceId(); + /** + * optional uint64 complete_sequence_id = 15; + * + *
+     ** the most recent sequence Id from cache flush 
+     * 
+ */ long getCompleteSequenceId(); } + /** + * Protobuf type {@code RegionLoad} + */ public static final class RegionLoad extends com.google.protobuf.GeneratedMessage implements RegionLoadOrBuilder { // Use RegionLoad.newBuilder() to construct. - private RegionLoad(Builder builder) { + private RegionLoad(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private RegionLoad(boolean noInit) {} - + private RegionLoad(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private static final RegionLoad defaultInstance; public static RegionLoad getDefaultInstance() { return defaultInstance; } - + public RegionLoad getDefaultInstanceForType() { return defaultInstance; } - + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RegionLoad( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = regionSpecifier_.toBuilder(); + } + regionSpecifier_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(regionSpecifier_); + regionSpecifier_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 16: { + bitField0_ |= 0x00000002; + stores_ = input.readUInt32(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + storefiles_ = input.readUInt32(); + break; + } + case 32: { + bitField0_ |= 0x00000008; + storeUncompressedSizeMB_ = input.readUInt32(); + break; + } + case 40: { + bitField0_ |= 0x00000010; + storefileSizeMB_ = input.readUInt32(); + break; + } + case 48: { + bitField0_ |= 0x00000020; + memstoreSizeMB_ = input.readUInt32(); + break; + } + case 56: { + bitField0_ |= 0x00000040; + storefileIndexSizeMB_ = input.readUInt32(); + break; + } + case 64: { + bitField0_ |= 0x00000080; + readRequestsCount_ = input.readUInt64(); + break; + } + case 72: { + bitField0_ |= 0x00000100; + writeRequestsCount_ = input.readUInt64(); + break; + } + case 80: { + bitField0_ |= 0x00000200; + totalCompactingKVs_ = input.readUInt64(); + break; + } + case 88: { + bitField0_ |= 0x00000400; + currentCompactedKVs_ = input.readUInt64(); + break; + } + case 96: { + bitField0_ |= 0x00000800; + rootIndexSizeKB_ = input.readUInt32(); + break; + } + case 104: { + bitField0_ |= 0x00001000; + totalStaticIndexSizeKB_ = input.readUInt32(); + break; + } + case 112: { + bitField0_ |= 0x00002000; + totalStaticBloomSizeKB_ = input.readUInt32(); + break; + } + case 120: { + bitField0_ |= 0x00004000; + completeSequenceId_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionLoad_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionLoad_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionLoad_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RegionLoad parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RegionLoad(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; } - + private int bitField0_; // required .RegionSpecifier region_specifier = 1; public static final int REGION_SPECIFIER_FIELD_NUMBER = 1; private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier regionSpecifier_; + /** + * required .RegionSpecifier region_specifier = 1; + * + *
+     ** the region specifier 
+     * 
+ */ public boolean hasRegionSpecifier() { return ((bitField0_ & 0x00000001) == 0x00000001); } + /** + * required .RegionSpecifier region_specifier = 1; + * + *
+     ** the region specifier 
+     * 
+ */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegionSpecifier() { return regionSpecifier_; } + /** + * required .RegionSpecifier region_specifier = 1; + * + *
+     ** the region specifier 
+     * 
+ */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionSpecifierOrBuilder() { return regionSpecifier_; } - + // optional uint32 stores = 2; public static final int STORES_FIELD_NUMBER = 2; private int stores_; + /** + * optional uint32 stores = 2; + * + *
+     ** the number of stores for the region 
+     * 
+ */ public boolean hasStores() { return ((bitField0_ & 0x00000002) == 0x00000002); } + /** + * optional uint32 stores = 2; + * + *
+     ** the number of stores for the region 
+     * 
+ */ public int getStores() { return stores_; } - + // optional uint32 storefiles = 3; public static final int STOREFILES_FIELD_NUMBER = 3; private int storefiles_; + /** + * optional uint32 storefiles = 3; + * + *
+     ** the number of storefiles for the region 
+     * 
+ */ public boolean hasStorefiles() { return ((bitField0_ & 0x00000004) == 0x00000004); } + /** + * optional uint32 storefiles = 3; + * + *
+     ** the number of storefiles for the region 
+     * 
+ */ public int getStorefiles() { return storefiles_; } - + // optional uint32 store_uncompressed_size_MB = 4; public static final int STORE_UNCOMPRESSED_SIZE_MB_FIELD_NUMBER = 4; private int storeUncompressedSizeMB_; + /** + * optional uint32 store_uncompressed_size_MB = 4; + * + *
+     ** the total size of the store files for the region, uncompressed, in MB 
+     * 
+ */ public boolean hasStoreUncompressedSizeMB() { return ((bitField0_ & 0x00000008) == 0x00000008); } + /** + * optional uint32 store_uncompressed_size_MB = 4; + * + *
+     ** the total size of the store files for the region, uncompressed, in MB 
+     * 
+ */ public int getStoreUncompressedSizeMB() { return storeUncompressedSizeMB_; } - + // optional uint32 storefile_size_MB = 5; public static final int STOREFILE_SIZE_MB_FIELD_NUMBER = 5; private int storefileSizeMB_; + /** + * optional uint32 storefile_size_MB = 5; + * + *
+     ** the current total size of the store files for the region, in MB 
+     * 
+ */ public boolean hasStorefileSizeMB() { return ((bitField0_ & 0x00000010) == 0x00000010); } + /** + * optional uint32 storefile_size_MB = 5; + * + *
+     ** the current total size of the store files for the region, in MB 
+     * 
+ */ public int getStorefileSizeMB() { return storefileSizeMB_; } - + // optional uint32 memstore_size_MB = 6; public static final int MEMSTORE_SIZE_MB_FIELD_NUMBER = 6; private int memstoreSizeMB_; + /** + * optional uint32 memstore_size_MB = 6; + * + *
+     ** the current size of the memstore for the region, in MB 
+     * 
+ */ public boolean hasMemstoreSizeMB() { return ((bitField0_ & 0x00000020) == 0x00000020); } + /** + * optional uint32 memstore_size_MB = 6; + * + *
+     ** the current size of the memstore for the region, in MB 
+     * 
+ */ public int getMemstoreSizeMB() { return memstoreSizeMB_; } - + // optional uint32 storefile_index_size_MB = 7; public static final int STOREFILE_INDEX_SIZE_MB_FIELD_NUMBER = 7; private int storefileIndexSizeMB_; + /** + * optional uint32 storefile_index_size_MB = 7; + * + *
+     **
+     * The current total size of root-level store file indexes for the region,
+     * in MB. The same as {@link #rootIndexSizeKB} but in MB.
+     * 
+ */ public boolean hasStorefileIndexSizeMB() { return ((bitField0_ & 0x00000040) == 0x00000040); } + /** + * optional uint32 storefile_index_size_MB = 7; + * + *
+     **
+     * The current total size of root-level store file indexes for the region,
+     * in MB. The same as {@link #rootIndexSizeKB} but in MB.
+     * 
+ */ public int getStorefileIndexSizeMB() { return storefileIndexSizeMB_; } - + // optional uint64 read_requests_count = 8; public static final int READ_REQUESTS_COUNT_FIELD_NUMBER = 8; private long readRequestsCount_; + /** + * optional uint64 read_requests_count = 8; + * + *
+     ** the current total read requests made to region 
+     * 
+ */ public boolean hasReadRequestsCount() { return ((bitField0_ & 0x00000080) == 0x00000080); } + /** + * optional uint64 read_requests_count = 8; + * + *
+     ** the current total read requests made to region 
+     * 
+ */ public long getReadRequestsCount() { return readRequestsCount_; } - + // optional uint64 write_requests_count = 9; public static final int WRITE_REQUESTS_COUNT_FIELD_NUMBER = 9; private long writeRequestsCount_; + /** [... 6670 lines stripped ...]