Return-Path: X-Original-To: apmail-hbase-commits-archive@www.apache.org Delivered-To: apmail-hbase-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 0CF2417506 for ; Thu, 12 Feb 2015 22:51:14 +0000 (UTC) Received: (qmail 67217 invoked by uid 500); 12 Feb 2015 22:50:15 -0000 Delivered-To: apmail-hbase-commits-archive@hbase.apache.org Received: (qmail 67027 invoked by uid 500); 12 Feb 2015 22:50:15 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 67006 invoked by uid 99); 12 Feb 2015 22:50:15 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 12 Feb 2015 22:50:15 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 275FBE03EB; Thu, 12 Feb 2015 22:50:15 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: apurtell@apache.org To: commits@hbase.apache.org Date: Thu, 12 Feb 2015 22:50:16 -0000 Message-Id: In-Reply-To: <4a37be23deff4bea9a225424e745d40d@git.apache.org> References: <4a37be23deff4bea9a225424e745d40d@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [2/3] hbase git commit: HBASE-9531 a command line (hbase shell) interface to retreive the replication metrics and show replication lag http://git-wip-us.apache.org/repos/asf/hbase/blob/16ed3451/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java ---------------------------------------------------------------------- diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java index 6dc48fa..0d69d7a 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java @@ -4438,273 +4438,48 @@ public final class ClusterStatusProtos { // @@protoc_insertion_point(class_scope:RegionLoad) } - public interface ServerLoadOrBuilder + public interface ReplicationLoadSinkOrBuilder extends com.google.protobuf.MessageOrBuilder { - // optional uint32 number_of_requests = 1; - /** - * optional uint32 number_of_requests = 1; - * - *
-     ** Number of requests since last report. 
-     * 
- */ - boolean hasNumberOfRequests(); - /** - * optional uint32 number_of_requests = 1; - * - *
-     ** Number of requests since last report. 
-     * 
- */ - int getNumberOfRequests(); - - // optional uint32 total_number_of_requests = 2; - /** - * optional uint32 total_number_of_requests = 2; - * - *
-     ** Total Number of requests from the start of the region server. 
-     * 
- */ - boolean hasTotalNumberOfRequests(); - /** - * optional uint32 total_number_of_requests = 2; - * - *
-     ** Total Number of requests from the start of the region server. 
-     * 
- */ - int getTotalNumberOfRequests(); - - // optional uint32 used_heap_MB = 3; - /** - * optional uint32 used_heap_MB = 3; - * - *
-     ** the amount of used heap, in MB. 
-     * 
- */ - boolean hasUsedHeapMB(); - /** - * optional uint32 used_heap_MB = 3; - * - *
-     ** the amount of used heap, in MB. 
-     * 
- */ - int getUsedHeapMB(); - - // optional uint32 max_heap_MB = 4; - /** - * optional uint32 max_heap_MB = 4; - * - *
-     ** the maximum allowable size of the heap, in MB. 
-     * 
- */ - boolean hasMaxHeapMB(); - /** - * optional uint32 max_heap_MB = 4; - * - *
-     ** the maximum allowable size of the heap, in MB. 
-     * 
- */ - int getMaxHeapMB(); - - // repeated .RegionLoad region_loads = 5; - /** - * repeated .RegionLoad region_loads = 5; - * - *
-     ** Information on the load of individual regions. 
-     * 
- */ - java.util.List - getRegionLoadsList(); - /** - * repeated .RegionLoad region_loads = 5; - * - *
-     ** Information on the load of individual regions. 
-     * 
- */ - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad getRegionLoads(int index); - /** - * repeated .RegionLoad region_loads = 5; - * - *
-     ** Information on the load of individual regions. 
-     * 
- */ - int getRegionLoadsCount(); - /** - * repeated .RegionLoad region_loads = 5; - * - *
-     ** Information on the load of individual regions. 
-     * 
- */ - java.util.List - getRegionLoadsOrBuilderList(); - /** - * repeated .RegionLoad region_loads = 5; - * - *
-     ** Information on the load of individual regions. 
-     * 
- */ - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder getRegionLoadsOrBuilder( - int index); - - // repeated .Coprocessor coprocessors = 6; - /** - * repeated .Coprocessor coprocessors = 6; - * - *
-     **
-     * Regionserver-level coprocessors, e.g., WALObserver implementations.
-     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-     * objects.
-     * 
- */ - java.util.List - getCoprocessorsList(); - /** - * repeated .Coprocessor coprocessors = 6; - * - *
-     **
-     * Regionserver-level coprocessors, e.g., WALObserver implementations.
-     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-     * objects.
-     * 
- */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getCoprocessors(int index); - /** - * repeated .Coprocessor coprocessors = 6; - * - *
-     **
-     * Regionserver-level coprocessors, e.g., WALObserver implementations.
-     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-     * objects.
-     * 
- */ - int getCoprocessorsCount(); - /** - * repeated .Coprocessor coprocessors = 6; - * - *
-     **
-     * Regionserver-level coprocessors, e.g., WALObserver implementations.
-     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-     * objects.
-     * 
- */ - java.util.List - getCoprocessorsOrBuilderList(); - /** - * repeated .Coprocessor coprocessors = 6; - * - *
-     **
-     * Regionserver-level coprocessors, e.g., WALObserver implementations.
-     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-     * objects.
-     * 
- */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder getCoprocessorsOrBuilder( - int index); - - // optional uint64 report_start_time = 7; - /** - * optional uint64 report_start_time = 7; - * - *
-     **
-     * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
-     * time is measured as the difference, measured in milliseconds, between the current time
-     * and midnight, January 1, 1970 UTC.
-     * 
- */ - boolean hasReportStartTime(); - /** - * optional uint64 report_start_time = 7; - * - *
-     **
-     * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
-     * time is measured as the difference, measured in milliseconds, between the current time
-     * and midnight, January 1, 1970 UTC.
-     * 
- */ - long getReportStartTime(); - - // optional uint64 report_end_time = 8; + // required uint64 ageOfLastAppliedOp = 1; /** - * optional uint64 report_end_time = 8; - * - *
-     **
-     * Time when report was generated.
-     * time is measured as the difference, measured in milliseconds, between the current time
-     * and midnight, January 1, 1970 UTC.
-     * 
+ * required uint64 ageOfLastAppliedOp = 1; */ - boolean hasReportEndTime(); + boolean hasAgeOfLastAppliedOp(); /** - * optional uint64 report_end_time = 8; - * - *
-     **
-     * Time when report was generated.
-     * time is measured as the difference, measured in milliseconds, between the current time
-     * and midnight, January 1, 1970 UTC.
-     * 
+ * required uint64 ageOfLastAppliedOp = 1; */ - long getReportEndTime(); + long getAgeOfLastAppliedOp(); - // optional uint32 info_server_port = 9; + // required uint64 timeStampsOfLastAppliedOp = 2; /** - * optional uint32 info_server_port = 9; - * - *
-     **
-     * The port number that this region server is hosing an info server on.
-     * 
+ * required uint64 timeStampsOfLastAppliedOp = 2; */ - boolean hasInfoServerPort(); + boolean hasTimeStampsOfLastAppliedOp(); /** - * optional uint32 info_server_port = 9; - * - *
-     **
-     * The port number that this region server is hosing an info server on.
-     * 
+ * required uint64 timeStampsOfLastAppliedOp = 2; */ - int getInfoServerPort(); + long getTimeStampsOfLastAppliedOp(); } /** - * Protobuf type {@code ServerLoad} + * Protobuf type {@code ReplicationLoadSink} */ - public static final class ServerLoad extends + public static final class ReplicationLoadSink extends com.google.protobuf.GeneratedMessage - implements ServerLoadOrBuilder { - // Use ServerLoad.newBuilder() to construct. - private ServerLoad(com.google.protobuf.GeneratedMessage.Builder builder) { + implements ReplicationLoadSinkOrBuilder { + // Use ReplicationLoadSink.newBuilder() to construct. + private ReplicationLoadSink(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private ServerLoad(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private ReplicationLoadSink(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final ServerLoad defaultInstance; - public static ServerLoad getDefaultInstance() { + private static final ReplicationLoadSink defaultInstance; + public static ReplicationLoadSink getDefaultInstance() { return defaultInstance; } - public ServerLoad getDefaultInstanceForType() { + public ReplicationLoadSink getDefaultInstanceForType() { return defaultInstance; } @@ -4714,7 +4489,7 @@ public final class ClusterStatusProtos { getUnknownFields() { return this.unknownFields; } - private ServerLoad( + private ReplicationLoadSink( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -4739,53 +4514,12 @@ public final class ClusterStatusProtos { } case 8: { bitField0_ |= 0x00000001; - numberOfRequests_ = input.readUInt32(); + ageOfLastAppliedOp_ = input.readUInt64(); break; } case 16: { bitField0_ |= 0x00000002; - totalNumberOfRequests_ = input.readUInt32(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - usedHeapMB_ = input.readUInt32(); - break; - } - case 32: { - bitField0_ |= 0x00000008; - maxHeapMB_ = input.readUInt32(); - break; - } - case 42: { - if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) { - regionLoads_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000010; - } - regionLoads_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.PARSER, extensionRegistry)); - break; - } - case 50: { - if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) { - coprocessors_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000020; - } - coprocessors_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.PARSER, extensionRegistry)); - break; - } - case 56: { - bitField0_ |= 0x00000010; - reportStartTime_ = input.readUInt64(); - break; - } - case 64: { - bitField0_ |= 0x00000020; - reportEndTime_ = input.readUInt64(); - break; - } - case 72: { - bitField0_ |= 0x00000040; - infoServerPort_ = input.readUInt32(); + timeStampsOfLastAppliedOp_ = input.readUInt64(); break; } } @@ -4796,1957 +4530,4480 @@ public final class ClusterStatusProtos { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) { - regionLoads_ = java.util.Collections.unmodifiableList(regionLoads_); - } - if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) { - coprocessors_ = java.util.Collections.unmodifiableList(coprocessors_); - } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ServerLoad_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSink_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ServerLoad_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSink_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public ServerLoad parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ReplicationLoadSink parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new ServerLoad(input, extensionRegistry); + return new ReplicationLoadSink(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // optional uint32 number_of_requests = 1; - public static final int NUMBER_OF_REQUESTS_FIELD_NUMBER = 1; - private int numberOfRequests_; + // required uint64 ageOfLastAppliedOp = 1; + public static final int AGEOFLASTAPPLIEDOP_FIELD_NUMBER = 1; + private long ageOfLastAppliedOp_; /** - * optional uint32 number_of_requests = 1; - * - *
-     ** Number of requests since last report. 
-     * 
+ * required uint64 ageOfLastAppliedOp = 1; */ - public boolean hasNumberOfRequests() { + public boolean hasAgeOfLastAppliedOp() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional uint32 number_of_requests = 1; - * - *
-     ** Number of requests since last report. 
-     * 
+ * required uint64 ageOfLastAppliedOp = 1; */ - public int getNumberOfRequests() { - return numberOfRequests_; + public long getAgeOfLastAppliedOp() { + return ageOfLastAppliedOp_; } - // optional uint32 total_number_of_requests = 2; - public static final int TOTAL_NUMBER_OF_REQUESTS_FIELD_NUMBER = 2; - private int totalNumberOfRequests_; + // required uint64 timeStampsOfLastAppliedOp = 2; + public static final int TIMESTAMPSOFLASTAPPLIEDOP_FIELD_NUMBER = 2; + private long timeStampsOfLastAppliedOp_; /** - * optional uint32 total_number_of_requests = 2; - * - *
-     ** Total Number of requests from the start of the region server. 
-     * 
+ * required uint64 timeStampsOfLastAppliedOp = 2; */ - public boolean hasTotalNumberOfRequests() { + public boolean hasTimeStampsOfLastAppliedOp() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional uint32 total_number_of_requests = 2; - * - *
-     ** Total Number of requests from the start of the region server. 
-     * 
+ * required uint64 timeStampsOfLastAppliedOp = 2; */ - public int getTotalNumberOfRequests() { - return totalNumberOfRequests_; + public long getTimeStampsOfLastAppliedOp() { + return timeStampsOfLastAppliedOp_; } - // optional uint32 used_heap_MB = 3; - public static final int USED_HEAP_MB_FIELD_NUMBER = 3; - private int usedHeapMB_; - /** - * optional uint32 used_heap_MB = 3; - * - *
-     ** the amount of used heap, in MB. 
-     * 
- */ - public boolean hasUsedHeapMB() { - return ((bitField0_ & 0x00000004) == 0x00000004); + private void initFields() { + ageOfLastAppliedOp_ = 0L; + timeStampsOfLastAppliedOp_ = 0L; } - /** - * optional uint32 used_heap_MB = 3; - * - *
-     ** the amount of used heap, in MB. 
-     * 
- */ - public int getUsedHeapMB() { - return usedHeapMB_; + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasAgeOfLastAppliedOp()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTimeStampsOfLastAppliedOp()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; } - // optional uint32 max_heap_MB = 4; - public static final int MAX_HEAP_MB_FIELD_NUMBER = 4; - private int maxHeapMB_; - /** - * optional uint32 max_heap_MB = 4; - * - *
-     ** the maximum allowable size of the heap, in MB. 
-     * 
- */ - public boolean hasMaxHeapMB() { - return ((bitField0_ & 0x00000008) == 0x00000008); + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, ageOfLastAppliedOp_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, timeStampsOfLastAppliedOp_); + } + getUnknownFields().writeTo(output); } - /** - * optional uint32 max_heap_MB = 4; - * - *
-     ** the maximum allowable size of the heap, in MB. 
-     * 
- */ - public int getMaxHeapMB() { - return maxHeapMB_; + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, ageOfLastAppliedOp_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, timeStampsOfLastAppliedOp_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; } - // repeated .RegionLoad region_loads = 5; - public static final int REGION_LOADS_FIELD_NUMBER = 5; - private java.util.List regionLoads_; - /** - * repeated .RegionLoad region_loads = 5; - * - *
-     ** Information on the load of individual regions. 
-     * 
- */ - public java.util.List getRegionLoadsList() { - return regionLoads_; + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); } - /** - * repeated .RegionLoad region_loads = 5; - * - *
-     ** Information on the load of individual regions. 
-     * 
- */ - public java.util.List - getRegionLoadsOrBuilderList() { - return regionLoads_; + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink) obj; + + boolean result = true; + result = result && (hasAgeOfLastAppliedOp() == other.hasAgeOfLastAppliedOp()); + if (hasAgeOfLastAppliedOp()) { + result = result && (getAgeOfLastAppliedOp() + == other.getAgeOfLastAppliedOp()); + } + result = result && (hasTimeStampsOfLastAppliedOp() == other.hasTimeStampsOfLastAppliedOp()); + if (hasTimeStampsOfLastAppliedOp()) { + result = result && (getTimeStampsOfLastAppliedOp() + == other.getTimeStampsOfLastAppliedOp()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } - /** - * repeated .RegionLoad region_loads = 5; - * - *
-     ** Information on the load of individual regions. 
-     * 
- */ - public int getRegionLoadsCount() { - return regionLoads_.size(); + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasAgeOfLastAppliedOp()) { + hash = (37 * hash) + AGEOFLASTAPPLIEDOP_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getAgeOfLastAppliedOp()); + } + if (hasTimeStampsOfLastAppliedOp()) { + hash = (37 * hash) + TIMESTAMPSOFLASTAPPLIEDOP_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getTimeStampsOfLastAppliedOp()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; } - /** - * repeated .RegionLoad region_loads = 5; - * - *
-     ** Information on the load of individual regions. 
-     * 
- */ - public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad getRegionLoads(int index) { - return regionLoads_.get(index); + + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); } - /** - * repeated .RegionLoad region_loads = 5; - * - *
-     ** Information on the load of individual regions. 
-     * 
- */ - public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder getRegionLoadsOrBuilder( - int index) { - return regionLoads_.get(index); + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); } - - // repeated .Coprocessor coprocessors = 6; - public static final int COPROCESSORS_FIELD_NUMBER = 6; - private java.util.List coprocessors_; - /** - * repeated .Coprocessor coprocessors = 6; - * - *
-     **
-     * Regionserver-level coprocessors, e.g., WALObserver implementations.
-     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-     * objects.
-     * 
- */ - public java.util.List getCoprocessorsList() { - return coprocessors_; + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); } - /** - * repeated .Coprocessor coprocessors = 6; - * - *
-     **
-     * Regionserver-level coprocessors, e.g., WALObserver implementations.
-     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-     * objects.
-     * 
- */ - public java.util.List - getCoprocessorsOrBuilderList() { - return coprocessors_; + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); } - /** - * repeated .Coprocessor coprocessors = 6; - * - *
-     **
-     * Regionserver-level coprocessors, e.g., WALObserver implementations.
-     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-     * objects.
-     * 
- */ - public int getCoprocessorsCount() { - return coprocessors_.size(); + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); } - /** - * repeated .Coprocessor coprocessors = 6; - * - *
-     **
-     * Regionserver-level coprocessors, e.g., WALObserver implementations.
-     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-     * objects.
-     * 
- */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getCoprocessors(int index) { - return coprocessors_.get(index); + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); } - /** - * repeated .Coprocessor coprocessors = 6; - * - *
-     **
-     * Regionserver-level coprocessors, e.g., WALObserver implementations.
-     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-     * objects.
-     * 
- */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder getCoprocessorsOrBuilder( - int index) { - return coprocessors_.get(index); + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); } - - // optional uint64 report_start_time = 7; - public static final int REPORT_START_TIME_FIELD_NUMBER = 7; - private long reportStartTime_; - /** - * optional uint64 report_start_time = 7; - * - *
-     **
-     * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
-     * time is measured as the difference, measured in milliseconds, between the current time
-     * and midnight, January 1, 1970 UTC.
-     * 
- */ - public boolean hasReportStartTime() { - return ((bitField0_ & 0x00000010) == 0x00000010); + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); } - /** - * optional uint64 report_start_time = 7; - * - *
-     **
-     * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
-     * time is measured as the difference, measured in milliseconds, between the current time
-     * and midnight, January 1, 1970 UTC.
-     * 
- */ - public long getReportStartTime() { - return reportStartTime_; + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); } - - // optional uint64 report_end_time = 8; - public static final int REPORT_END_TIME_FIELD_NUMBER = 8; - private long reportEndTime_; - /** - * optional uint64 report_end_time = 8; - * - *
-     **
-     * Time when report was generated.
-     * time is measured as the difference, measured in milliseconds, between the current time
-     * and midnight, January 1, 1970 UTC.
-     * 
- */ - public boolean hasReportEndTime() { - return ((bitField0_ & 0x00000020) == 0x00000020); + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); } - /** - * optional uint64 report_end_time = 8; - * - *
-     **
-     * Time when report was generated.
-     * time is measured as the difference, measured in milliseconds, between the current time
-     * and midnight, January 1, 1970 UTC.
-     * 
- */ - public long getReportEndTime() { - return reportEndTime_; + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink prototype) { + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } - // optional uint32 info_server_port = 9; - public static final int INFO_SERVER_PORT_FIELD_NUMBER = 9; - private int infoServerPort_; - /** - * optional uint32 info_server_port = 9; - * - *
-     **
-     * The port number that this region server is hosing an info server on.
-     * 
- */ - public boolean hasInfoServerPort() { - return ((bitField0_ & 0x00000040) == 0x00000040); + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; } /** - * optional uint32 info_server_port = 9; - * - *
-     **
-     * The port number that this region server is hosing an info server on.
-     * 
+ * Protobuf type {@code ReplicationLoadSink} */ - public int getInfoServerPort() { - return infoServerPort_; - } - - private void initFields() { - numberOfRequests_ = 0; - totalNumberOfRequests_ = 0; - usedHeapMB_ = 0; - maxHeapMB_ = 0; - regionLoads_ = java.util.Collections.emptyList(); - coprocessors_ = java.util.Collections.emptyList(); - reportStartTime_ = 0L; - reportEndTime_ = 0L; - infoServerPort_ = 0; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - for (int i = 0; i < getRegionLoadsCount(); i++) { - if (!getRegionLoads(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSink_descriptor; } - for (int i = 0; i < getCoprocessorsCount(); i++) { - if (!getCoprocessors(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSink_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder.class); } - memoizedIsInitialized = 1; - return true; - } - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt32(1, numberOfRequests_); + // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt32(2, totalNumberOfRequests_); + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeUInt32(3, usedHeapMB_); + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeUInt32(4, maxHeapMB_); + private static Builder create() { + return new Builder(); } - for (int i = 0; i < regionLoads_.size(); i++) { - output.writeMessage(5, regionLoads_.get(i)); + + public Builder clear() { + super.clear(); + ageOfLastAppliedOp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + timeStampsOfLastAppliedOp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + return this; } - for (int i = 0; i < coprocessors_.size(); i++) { - output.writeMessage(6, coprocessors_.get(i)); + + public Builder clone() { + return create().mergeFrom(buildPartial()); } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeUInt64(7, reportStartTime_); + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSink_descriptor; } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - output.writeUInt64(8, reportEndTime_); + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance(); } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - output.writeUInt32(9, infoServerPort_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(1, numberOfRequests_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(2, totalNumberOfRequests_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(3, usedHeapMB_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(4, maxHeapMB_); - } - for (int i = 0; i < regionLoads_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(5, regionLoads_.get(i)); - } - for (int i = 0; i < coprocessors_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(6, coprocessors_.get(i)); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(7, reportStartTime_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(8, reportEndTime_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(9, infoServerPort_); + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink build() { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad)) { - return super.equals(obj); + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.ageOfLastAppliedOp_ = ageOfLastAppliedOp_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.timeStampsOfLastAppliedOp_ = timeStampsOfLastAppliedOp_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; } - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad) obj; - boolean result = true; - result = result && (hasNumberOfRequests() == other.hasNumberOfRequests()); - if (hasNumberOfRequests()) { - result = result && (getNumberOfRequests() - == other.getNumberOfRequests()); - } - result = result && (hasTotalNumberOfRequests() == other.hasTotalNumberOfRequests()); - if (hasTotalNumberOfRequests()) { - result = result && (getTotalNumberOfRequests() - == other.getTotalNumberOfRequests()); - } - result = result && (hasUsedHeapMB() == other.hasUsedHeapMB()); - if (hasUsedHeapMB()) { - result = result && (getUsedHeapMB() - == other.getUsedHeapMB()); - } - result = result && (hasMaxHeapMB() == other.hasMaxHeapMB()); - if (hasMaxHeapMB()) { - result = result && (getMaxHeapMB() - == other.getMaxHeapMB()); - } - result = result && getRegionLoadsList() - .equals(other.getRegionLoadsList()); - result = result && getCoprocessorsList() - .equals(other.getCoprocessorsList()); - result = result && (hasReportStartTime() == other.hasReportStartTime()); - if (hasReportStartTime()) { - result = result && (getReportStartTime() - == other.getReportStartTime()); - } - result = result && (hasReportEndTime() == other.hasReportEndTime()); - if (hasReportEndTime()) { - result = result && (getReportEndTime() - == other.getReportEndTime()); + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink)other); + } else { + super.mergeFrom(other); + return this; + } } - result = result && (hasInfoServerPort() == other.hasInfoServerPort()); - if (hasInfoServerPort()) { - result = result && (getInfoServerPort() - == other.getInfoServerPort()); + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance()) return this; + if (other.hasAgeOfLastAppliedOp()) { + setAgeOfLastAppliedOp(other.getAgeOfLastAppliedOp()); + } + if (other.hasTimeStampsOfLastAppliedOp()) { + setTimeStampsOfLastAppliedOp(other.getTimeStampsOfLastAppliedOp()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - private int memoizedHashCode = 0; - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; + public final boolean isInitialized() { + if (!hasAgeOfLastAppliedOp()) { + + return false; + } + if (!hasTimeStampsOfLastAppliedOp()) { + + return false; + } + return true; } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasNumberOfRequests()) { - hash = (37 * hash) + NUMBER_OF_REQUESTS_FIELD_NUMBER; - hash = (53 * hash) + getNumberOfRequests(); + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; } - if (hasTotalNumberOfRequests()) { - hash = (37 * hash) + TOTAL_NUMBER_OF_REQUESTS_FIELD_NUMBER; - hash = (53 * hash) + getTotalNumberOfRequests(); + private int bitField0_; + + // required uint64 ageOfLastAppliedOp = 1; + private long ageOfLastAppliedOp_ ; + /** + * required uint64 ageOfLastAppliedOp = 1; + */ + public boolean hasAgeOfLastAppliedOp() { + return ((bitField0_ & 0x00000001) == 0x00000001); } - if (hasUsedHeapMB()) { - hash = (37 * hash) + USED_HEAP_MB_FIELD_NUMBER; - hash = (53 * hash) + getUsedHeapMB(); + /** + * required uint64 ageOfLastAppliedOp = 1; + */ + public long getAgeOfLastAppliedOp() { + return ageOfLastAppliedOp_; } - if (hasMaxHeapMB()) { - hash = (37 * hash) + MAX_HEAP_MB_FIELD_NUMBER; - hash = (53 * hash) + getMaxHeapMB(); + /** + * required uint64 ageOfLastAppliedOp = 1; + */ + public Builder setAgeOfLastAppliedOp(long value) { + bitField0_ |= 0x00000001; + ageOfLastAppliedOp_ = value; + onChanged(); + return this; } - if (getRegionLoadsCount() > 0) { - hash = (37 * hash) + REGION_LOADS_FIELD_NUMBER; - hash = (53 * hash) + getRegionLoadsList().hashCode(); + /** + * required uint64 ageOfLastAppliedOp = 1; + */ + public Builder clearAgeOfLastAppliedOp() { + bitField0_ = (bitField0_ & ~0x00000001); + ageOfLastAppliedOp_ = 0L; + onChanged(); + return this; } - if (getCoprocessorsCount() > 0) { - hash = (37 * hash) + COPROCESSORS_FIELD_NUMBER; - hash = (53 * hash) + getCoprocessorsList().hashCode(); + + // required uint64 timeStampsOfLastAppliedOp = 2; + private long timeStampsOfLastAppliedOp_ ; + /** + * required uint64 timeStampsOfLastAppliedOp = 2; + */ + public boolean hasTimeStampsOfLastAppliedOp() { + return ((bitField0_ & 0x00000002) == 0x00000002); } - if (hasReportStartTime()) { - hash = (37 * hash) + REPORT_START_TIME_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getReportStartTime()); + /** + * required uint64 timeStampsOfLastAppliedOp = 2; + */ + public long getTimeStampsOfLastAppliedOp() { + return timeStampsOfLastAppliedOp_; } - if (hasReportEndTime()) { - hash = (37 * hash) + REPORT_END_TIME_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getReportEndTime()); + /** + * required uint64 timeStampsOfLastAppliedOp = 2; + */ + public Builder setTimeStampsOfLastAppliedOp(long value) { + bitField0_ |= 0x00000002; + timeStampsOfLastAppliedOp_ = value; + onChanged(); + return this; } - if (hasInfoServerPort()) { - hash = (37 * hash) + INFO_SERVER_PORT_FIELD_NUMBER; - hash = (53 * hash) + getInfoServerPort(); + /** + * required uint64 timeStampsOfLastAppliedOp = 2; + */ + public Builder clearTimeStampsOfLastAppliedOp() { + bitField0_ = (bitField0_ & ~0x00000002); + timeStampsOfLastAppliedOp_ = 0L; + onChanged(); + return this; } - hash = (29 * hash) + getUnknownFields().hashCode(); - memoizedHashCode = hash; - return hash; - } - public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); + // @@protoc_insertion_point(builder_scope:ReplicationLoadSink) } - public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); + + static { + defaultInstance = new ReplicationLoadSink(true); + defaultInstance.initFields(); } - public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); + + // @@protoc_insertion_point(class_scope:ReplicationLoadSink) + } + + public interface ReplicationLoadSourceOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string peerID = 1; + /** + * required string peerID = 1; + */ + boolean hasPeerID(); + /** + * required string peerID = 1; + */ + java.lang.String getPeerID(); + /** + * required string peerID = 1; + */ + com.google.protobuf.ByteString + getPeerIDBytes(); + + // required uint64 ageOfLastShippedOp = 2; + /** + * required uint64 ageOfLastShippedOp = 2; + */ + boolean hasAgeOfLastShippedOp(); + /** + * required uint64 ageOfLastShippedOp = 2; + */ + long getAgeOfLastShippedOp(); + + // required uint32 sizeOfLogQueue = 3; + /** + * required uint32 sizeOfLogQueue = 3; + */ + boolean hasSizeOfLogQueue(); + /** + * required uint32 sizeOfLogQueue = 3; + */ + int getSizeOfLogQueue(); + + // required uint64 timeStampOfLastShippedOp = 4; + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + boolean hasTimeStampOfLastShippedOp(); + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + long getTimeStampOfLastShippedOp(); + + // required uint64 replicationLag = 5; + /** + * required uint64 replicationLag = 5; + */ + boolean hasReplicationLag(); + /** + * required uint64 replicationLag = 5; + */ + long getReplicationLag(); + } + /** + * Protobuf type {@code ReplicationLoadSource} + */ + public static final class ReplicationLoadSource extends + com.google.protobuf.GeneratedMessage + implements ReplicationLoadSourceOrBuilder { + // Use ReplicationLoadSource.newBuilder() to construct. + private ReplicationLoadSource(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); } - public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); + private ReplicationLoadSource(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ReplicationLoadSource defaultInstance; + public static ReplicationLoadSource getDefaultInstance() { + return defaultInstance; } - public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); + + public ReplicationLoadSource getDefaultInstanceForType() { + return defaultInstance; } - public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; } - public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom( + private ReplicationLoadSource( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + peerID_ = input.readBytes(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + ageOfLastShippedOp_ = input.readUInt64(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + sizeOfLogQueue_ = input.readUInt32(); + break; + } + case 32: { + bitField0_ |= 0x00000008; + timeStampOfLastShippedOp_ = input.readUInt64(); + break; + } + case 40: { + bitField0_ |= 0x00000010; + replicationLag_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSource_descriptor; } - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad prototype) { - return newBuilder().mergeFrom(prototype); + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSource_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder.class); } - public Builder toBuilder() { return newBuilder(this); } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ReplicationLoadSource parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ReplicationLoadSource(input, extensionRegistry); + } + }; @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; + public com.google.protobuf.Parser getParserForType() { + return PARSER; } + + private int bitField0_; + // required string peerID = 1; + public static final int PEERID_FIELD_NUMBER = 1; + private java.lang.Object peerID_; /** - * Protobuf type {@code ServerLoad} + * required string peerID = 1; */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ServerLoad_descriptor; + public boolean hasPeerID() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string peerID = 1; + */ + public java.lang.String getPeerID() { + java.lang.Object ref = peerID_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + peerID_ = s; + } + return s; } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ServerLoad_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder.class); + } + /** + * required string peerID = 1; + */ + public com.google.protobuf.ByteString + getPeerIDBytes() { + java.lang.Object ref = peerID_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + peerID_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; } + } - // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } + // required uint64 ageOfLastShippedOp = 2; + public static final int AGEOFLASTSHIPPEDOP_FIELD_NUMBER = 2; + private long ageOfLastShippedOp_; + /** + * required uint64 ageOfLastShippedOp = 2; + */ + public boolean hasAgeOfLastShippedOp() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint64 ageOfLastShippedOp = 2; + */ + public long getAgeOfLastShippedOp() { + return ageOfLastShippedOp_; + } - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getRegionLoadsFieldBuilder(); - getCoprocessorsFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } + // required uint32 sizeOfLogQueue = 3; + public static final int SIZEOFLOGQUEUE_FIELD_NUMBER = 3; + private int sizeOfLogQueue_; + /** + * required uint32 sizeOfLogQueue = 3; + */ + public boolean hasSizeOfLogQueue() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required uint32 sizeOfLogQueue = 3; + */ + public int getSizeOfLogQueue() { + return sizeOfLogQueue_; + } - public Builder clear() { - super.clear(); - numberOfRequests_ = 0; - bitField0_ = (bitField0_ & ~0x00000001); - totalNumberOfRequests_ = 0; - bitField0_ = (bitField0_ & ~0x00000002); - usedHeapMB_ = 0; - bitField0_ = (bitField0_ & ~0x00000004); - maxHeapMB_ = 0; - bitField0_ = (bitField0_ & ~0x00000008); - if (regionLoadsBuilder_ == null) { - regionLoads_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000010); - } else { - regionLoadsBuilder_.clear(); - } - if (coprocessorsBuilder_ == null) { - coprocessors_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000020); - } else { - coprocessorsBuilder_.clear(); - } - reportStartTime_ = 0L; - bitField0_ = (bitField0_ & ~0x00000040); - reportEndTime_ = 0L; - bitField0_ = (bitField0_ & ~0x00000080); - infoServerPort_ = 0; - bitField0_ = (bitField0_ & ~0x00000100); - return this; - } + // required uint64 timeStampOfLastShippedOp = 4; + public static final int TIMESTAMPOFLASTSHIPPEDOP_FIELD_NUMBER = 4; + private long timeStampOfLastShippedOp_; + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + public boolean hasTimeStampOfLastShippedOp() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + public long getTimeStampOfLastShippedOp() { + return timeStampOfLastShippedOp_; + } - public Builder clone() { - return create().mergeFrom(buildPartial()); - } + // required uint64 replicationLag = 5; + public static final int REPLICATIONLAG_FIELD_NUMBER = 5; + private long replicationLag_; + /** + * required uint64 replicationLag = 5; + */ + public boolean hasReplicationLag() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * required uint64 replicationLag = 5; + */ + public long getReplicationLag() { + return replicationLag_; + } - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ServerLoad_descriptor; - } + private void initFields() { + peerID_ = ""; + ageOfLastShippedOp_ = 0L; + sizeOfLogQueue_ = 0; + timeStampOfLastShippedOp_ = 0L; + replicationLag_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; - public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance(); + if (!hasPeerID()) { + memoizedIsInitialized = 0; + return false; } - - public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad build() { - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad result = buildPartial(); + if (!hasAgeOfLastShippedOp()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasSizeOfLogQueue()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTimeStampOfLastShippedOp()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasReplicationLag()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getPeerIDBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, ageOfLastShippedOp_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt32(3, sizeOfLogQueue_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeUInt64(4, timeStampOfLastShippedOp_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeUInt64(5, replicationLag_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getPeerIDBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, ageOfLastShippedOp_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(3, sizeOfLogQueue_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(4, timeStampOfLastShippedOp_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(5, replicationLag_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource) obj; + + boolean result = true; + result = result && (hasPeerID() == other.hasPeerID()); + if (hasPeerID()) { + result = result && getPeerID() + .equals(other.getPeerID()); + } + result = result && (hasAgeOfLastShippedOp() == other.hasAgeOfLastShippedOp()); + if (hasAgeOfLastShippedOp()) { + result = result && (getAgeOfLastShippedOp() + == other.getAgeOfLastShippedOp()); + } + result = result && (hasSizeOfLogQueue() == other.hasSizeOfLogQueue()); + if (hasSizeOfLogQueue()) { + result = result && (getSizeOfLogQueue() + == other.getSizeOfLogQueue()); + } + result = result && (hasTimeStampOfLastShippedOp() == other.hasTimeStampOfLastShippedOp()); + if (hasTimeStampOfLastShippedOp()) { + result = result && (getTimeStampOfLastShippedOp() + == other.getTimeStampOfLastShippedOp()); + } + result = result && (hasReplicationLag() == other.hasReplicationLag()); + if (hasReplicationLag()) { + result = result && (getReplicationLag() + == other.getReplicationLag()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasPeerID()) { + hash = (37 * hash) + PEERID_FIELD_NUMBER; + hash = (53 * hash) + getPeerID().hashCode(); + } + if (hasAgeOfLastShippedOp()) { + hash = (37 * hash) + AGEOFLASTSHIPPEDOP_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getAgeOfLastShippedOp()); + } + if (hasSizeOfLogQueue()) { + hash = (37 * hash) + SIZEOFLOGQUEUE_FIELD_NUMBER; + hash = (53 * hash) + getSizeOfLogQueue(); + } + if (hasTimeStampOfLastShippedOp()) { + hash = (37 * hash) + TIMESTAMPOFLASTSHIPPEDOP_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getTimeStampOfLastShippedOp()); + } + if (hasReplicationLag()) { + hash = (37 * hash) + REPLICATIONLAG_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getReplicationLag()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code ReplicationLoadSource} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSource_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSource_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + peerID_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + ageOfLastShippedOp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + sizeOfLogQueue_ = 0; + bitField0_ = (bitField0_ & ~0x00000004); + timeStampOfLastShippedOp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000008); + replicationLag_ = 0L; + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSource_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource build() { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad(this); + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.numberOfRequests_ = numberOfRequests_; + result.peerID_ = peerID_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - result.totalNumberOfRequests_ = totalNumberOfRequests_; + result.ageOfLastShippedOp_ = ageOfLastShippedOp_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } - result.usedHeapMB_ = usedHeapMB_; + result.sizeOfLogQueue_ = sizeOfLogQueue_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } - result.maxHeapMB_ = maxHeapMB_; + result.timeStampOfLastShippedOp_ = timeStampOfLastShippedOp_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.replicationLag_ = replicationLag_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.getDefaultInstance()) return this; + if (other.hasPeerID()) { + bitField0_ |= 0x00000001; + peerID_ = other.peerID_; + onChanged(); + } + if (other.hasAgeOfLastShippedOp()) { + setAgeOfLastShippedOp(other.getAgeOfLastShippedOp()); + } + if (other.hasSizeOfLogQueue()) { + setSizeOfLogQueue(other.getSizeOfLogQueue()); + } + if (other.hasTimeStampOfLastShippedOp()) { + setTimeStampOfLastShippedOp(other.getTimeStampOfLastShippedOp()); + } + if (other.hasReplicationLag()) { + setReplicationLag(other.getReplicationLag()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasPeerID()) { + + return false; + } + if (!hasAgeOfLastShippedOp()) { + + return false; + } + if (!hasSizeOfLogQueue()) { + + return false; + } + if (!hasTimeStampOfLastShippedOp()) { + + return false; + } + if (!hasReplicationLag()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string peerID = 1; + private java.lang.Object peerID_ = ""; + /** + * required string peerID = 1; + */ + public boolean hasPeerID() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string peerID = 1; + */ + public java.lang.String getPeerID() { + java.lang.Object ref = peerID_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + peerID_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string peerID = 1; + */ + public com.google.protobuf.ByteString + getPeerIDBytes() { + java.lang.Object ref = peerID_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + peerID_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string peerID = 1; + */ + public Builder setPeerID( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + peerID_ = value; + onChanged(); + return this; + } + /** + * required string peerID = 1; + */ + public Builder clearPeerID() { + bitField0_ = (bitField0_ & ~0x00000001); + peerID_ = getDefaultInstance().getPeerID(); + onChanged(); + return this; + } + /** + * required string peerID = 1; + */ + public Builder setPeerIDBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + peerID_ = value; + onChanged(); + return this; + } + + // required uint64 ageOfLastShippedOp = 2; + private long ageOfLastShippedOp_ ; + /** + * required uint64 ageOfLastShippedOp = 2; + */ + public boolean hasAgeOfLastShippedOp() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint64 ageOfLastShippedOp = 2; + */ + public long getAgeOfLastShippedOp() { + return ageOfLastShippedOp_; + } + /** + * required uint64 ageOfLastShippedOp = 2; + */ + public Builder setAgeOfLastShippedOp(long value) { + bitField0_ |= 0x00000002; + ageOfLastShippedOp_ = value; + onChanged(); + return this; + } + /** + * required uint64 ageOfLastShippedOp = 2; + */ + public Builder clearAgeOfLastShippedOp() { + bitField0_ = (bitField0_ & ~0x00000002); + ageOfLastShippedOp_ = 0L; + onChanged(); + return this; + } + + // required uint32 sizeOfLogQueue = 3; + private int sizeOfLogQueue_ ; + /** + * required uint32 sizeOfLogQueue = 3; + */ + public boolean hasSizeOfLogQueue() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required uint32 sizeOfLogQueue = 3; + */ + public int getSizeOfLogQueue() { + return sizeOfLogQueue_; + } + /** + * required uint32 sizeOfLogQueue = 3; + */ + public Builder setSizeOfLogQueue(int value) { + bitField0_ |= 0x00000004; + sizeOfLogQueue_ = value; + onChanged(); + return this; + } + /** + * required uint32 sizeOfLogQueue = 3; + */ + public Builder clearSizeOfLogQueue() { + bitField0_ = (bitField0_ & ~0x00000004); + sizeOfLogQueue_ = 0; + onChanged(); + return this; + } + + // required uint64 timeStampOfLastShippedOp = 4; + private long timeStampOfLastShippedOp_ ; + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + public boolean hasTimeStampOfLastShippedOp() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + public long getTimeStampOfLastShippedOp() { + return timeStampOfLastShippedOp_; + } + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + public Builder setTimeStampOfLastShippedOp(long value) { + bitField0_ |= 0x00000008; + timeStampOfLastShippedOp_ = value; + onChanged(); + return this; + } + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + public Builder clearTimeStampOfLastShippedOp() { + bitField0_ = (bitField0_ & ~0x00000008); + timeStampOfLastShippedOp_ = 0L; + onChanged(); + return this; + } + + // required uint64 replicationLag = 5; + private long replicationLag_ ; + /** + * required uint64 replicationLag = 5; + */ + public boolean hasReplicationLag() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * required uint64 replicationLag = 5; + */ + public long getReplicationLag() { + return replicationLag_; + } + /** + * required uint64 replicationLag = 5; + */ + public Builder setReplicationLag(long value) { + bitField0_ |= 0x00000010; + replicationLag_ = value; + onChanged(); + return this; + } + /** + * required uint64 replicationLag = 5; + */ + public Builder clearReplicationLag() { + bitField0_ = (bitField0_ & ~0x00000010); + replicationLag_ = 0