Return-Path: X-Original-To: apmail-hbase-commits-archive@www.apache.org Delivered-To: apmail-hbase-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id F393D176CB for ; Thu, 5 Mar 2015 01:35:01 +0000 (UTC) Received: (qmail 26875 invoked by uid 500); 5 Mar 2015 01:35:01 -0000 Delivered-To: apmail-hbase-commits-archive@hbase.apache.org Received: (qmail 26781 invoked by uid 500); 5 Mar 2015 01:35:01 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 26603 invoked by uid 99); 5 Mar 2015 01:35:01 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 05 Mar 2015 01:35:01 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 7CB56E0833; Thu, 5 Mar 2015 01:35:01 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: stack@apache.org To: commits@hbase.apache.org Date: Thu, 05 Mar 2015 01:35:04 -0000 Message-Id: <893db71f69624299bfd0bbef3ed29f26@git.apache.org> In-Reply-To: References: X-Mailer: ASF-Git Admin Mailer Subject: [4/5] hbase git commit: HBASE-11544: [Ergonomics] hbase.client.scanner.caching is dogged and will try to return batch even if it means OOME http://git-wip-us.apache.org/repos/asf/hbase/blob/de9791e9/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java ---------------------------------------------------------------------- diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java index afd67a1..5bcba26 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java @@ -4247,6 +4247,30 @@ public final class ClientProtos { * */ boolean getStale(); + + // optional bool partial = 5 [default = false]; + /** + * optional bool partial = 5 [default = false]; + * + *
+     * Whether or not the entire result could be returned. Results will be split when
+     * the RPC chunk size limit is reached. Partial results contain only a subset of the
+     * cells for a row and must be combined with a result containing the remaining cells
+     * to form a complete result
+     * 
+ */ + boolean hasPartial(); + /** + * optional bool partial = 5 [default = false]; + * + *
+     * Whether or not the entire result could be returned. Results will be split when
+     * the RPC chunk size limit is reached. Partial results contain only a subset of the
+     * cells for a row and must be combined with a result containing the remaining cells
+     * to form a complete result
+     * 
+ */ + boolean getPartial(); } /** * Protobuf type {@code Result} @@ -4322,6 +4346,11 @@ public final class ClientProtos { stale_ = input.readBool(); break; } + case 40: { + bitField0_ |= 0x00000008; + partial_ = input.readBool(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -4510,11 +4539,42 @@ public final class ClientProtos { return stale_; } + // optional bool partial = 5 [default = false]; + public static final int PARTIAL_FIELD_NUMBER = 5; + private boolean partial_; + /** + * optional bool partial = 5 [default = false]; + * + *
+     * Whether or not the entire result could be returned. Results will be split when
+     * the RPC chunk size limit is reached. Partial results contain only a subset of the
+     * cells for a row and must be combined with a result containing the remaining cells
+     * to form a complete result
+     * 
+ */ + public boolean hasPartial() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional bool partial = 5 [default = false]; + * + *
+     * Whether or not the entire result could be returned. Results will be split when
+     * the RPC chunk size limit is reached. Partial results contain only a subset of the
+     * cells for a row and must be combined with a result containing the remaining cells
+     * to form a complete result
+     * 
+ */ + public boolean getPartial() { + return partial_; + } + private void initFields() { cell_ = java.util.Collections.emptyList(); associatedCellCount_ = 0; exists_ = false; stale_ = false; + partial_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -4540,6 +4600,9 @@ public final class ClientProtos { if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBool(4, stale_); } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeBool(5, partial_); + } getUnknownFields().writeTo(output); } @@ -4565,6 +4628,10 @@ public final class ClientProtos { size += com.google.protobuf.CodedOutputStream .computeBoolSize(4, stale_); } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(5, partial_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -4605,6 +4672,11 @@ public final class ClientProtos { result = result && (getStale() == other.getStale()); } + result = result && (hasPartial() == other.hasPartial()); + if (hasPartial()) { + result = result && (getPartial() + == other.getPartial()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -4634,6 +4706,10 @@ public final class ClientProtos { hash = (37 * hash) + STALE_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getStale()); } + if (hasPartial()) { + hash = (37 * hash) + PARTIAL_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getPartial()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -4756,6 +4832,8 @@ public final class ClientProtos { bitField0_ = (bitField0_ & ~0x00000004); stale_ = false; bitField0_ = (bitField0_ & ~0x00000008); + partial_ = false; + bitField0_ = (bitField0_ & ~0x00000010); return this; } @@ -4805,6 +4883,10 @@ public final class ClientProtos { to_bitField0_ |= 0x00000004; } result.stale_ = stale_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000008; + } + result.partial_ = partial_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -4856,6 +4938,9 @@ public final class ClientProtos { if (other.hasStale()) { setStale(other.getStale()); } + if (other.hasPartial()) { + setPartial(other.getPartial()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -5384,6 +5469,67 @@ public final class ClientProtos { return this; } + // optional bool partial = 5 [default = false]; + private boolean partial_ ; + /** + * optional bool partial = 5 [default = false]; + * + *
+       * Whether or not the entire result could be returned. Results will be split when
+       * the RPC chunk size limit is reached. Partial results contain only a subset of the
+       * cells for a row and must be combined with a result containing the remaining cells
+       * to form a complete result
+       * 
+ */ + public boolean hasPartial() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional bool partial = 5 [default = false]; + * + *
+       * Whether or not the entire result could be returned. Results will be split when
+       * the RPC chunk size limit is reached. Partial results contain only a subset of the
+       * cells for a row and must be combined with a result containing the remaining cells
+       * to form a complete result
+       * 
+ */ + public boolean getPartial() { + return partial_; + } + /** + * optional bool partial = 5 [default = false]; + * + *
+       * Whether or not the entire result could be returned. Results will be split when
+       * the RPC chunk size limit is reached. Partial results contain only a subset of the
+       * cells for a row and must be combined with a result containing the remaining cells
+       * to form a complete result
+       * 
+ */ + public Builder setPartial(boolean value) { + bitField0_ |= 0x00000010; + partial_ = value; + onChanged(); + return this; + } + /** + * optional bool partial = 5 [default = false]; + * + *
+       * Whether or not the entire result could be returned. Results will be split when
+       * the RPC chunk size limit is reached. Partial results contain only a subset of the
+       * cells for a row and must be combined with a result containing the remaining cells
+       * to form a complete result
+       * 
+ */ + public Builder clearPartial() { + bitField0_ = (bitField0_ & ~0x00000010); + partial_ = false; + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:Result) } @@ -16277,6 +16423,16 @@ public final class ClientProtos { * optional uint64 next_call_seq = 6; */ long getNextCallSeq(); + + // optional bool client_handles_partials = 7; + /** + * optional bool client_handles_partials = 7; + */ + boolean hasClientHandlesPartials(); + /** + * optional bool client_handles_partials = 7; + */ + boolean getClientHandlesPartials(); } /** * Protobuf type {@code ScanRequest} @@ -16388,6 +16544,11 @@ public final class ClientProtos { nextCallSeq_ = input.readUInt64(); break; } + case 56: { + bitField0_ |= 0x00000040; + clientHandlesPartials_ = input.readBool(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -16536,6 +16697,22 @@ public final class ClientProtos { return nextCallSeq_; } + // optional bool client_handles_partials = 7; + public static final int CLIENT_HANDLES_PARTIALS_FIELD_NUMBER = 7; + private boolean clientHandlesPartials_; + /** + * optional bool client_handles_partials = 7; + */ + public boolean hasClientHandlesPartials() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional bool client_handles_partials = 7; + */ + public boolean getClientHandlesPartials() { + return clientHandlesPartials_; + } + private void initFields() { region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); scan_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.getDefaultInstance(); @@ -16543,6 +16720,7 @@ public final class ClientProtos { numberOfRows_ = 0; closeScanner_ = false; nextCallSeq_ = 0L; + clientHandlesPartials_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -16586,6 +16764,9 @@ public final class ClientProtos { if (((bitField0_ & 0x00000020) == 0x00000020)) { output.writeUInt64(6, nextCallSeq_); } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + output.writeBool(7, clientHandlesPartials_); + } getUnknownFields().writeTo(output); } @@ -16619,6 +16800,10 @@ public final class ClientProtos { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(6, nextCallSeq_); } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(7, clientHandlesPartials_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -16672,6 +16857,11 @@ public final class ClientProtos { result = result && (getNextCallSeq() == other.getNextCallSeq()); } + result = result && (hasClientHandlesPartials() == other.hasClientHandlesPartials()); + if (hasClientHandlesPartials()) { + result = result && (getClientHandlesPartials() + == other.getClientHandlesPartials()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -16709,6 +16899,10 @@ public final class ClientProtos { hash = (37 * hash) + NEXT_CALL_SEQ_FIELD_NUMBER; hash = (53 * hash) + hashLong(getNextCallSeq()); } + if (hasClientHandlesPartials()) { + hash = (37 * hash) + CLIENT_HANDLES_PARTIALS_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getClientHandlesPartials()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -16853,6 +17047,8 @@ public final class ClientProtos { bitField0_ = (bitField0_ & ~0x00000010); nextCallSeq_ = 0L; bitField0_ = (bitField0_ & ~0x00000020); + clientHandlesPartials_ = false; + bitField0_ = (bitField0_ & ~0x00000040); return this; } @@ -16913,6 +17109,10 @@ public final class ClientProtos { to_bitField0_ |= 0x00000020; } result.nextCallSeq_ = nextCallSeq_; + if (((from_bitField0_ & 0x00000040) == 0x00000040)) { + to_bitField0_ |= 0x00000040; + } + result.clientHandlesPartials_ = clientHandlesPartials_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -16947,6 +17147,9 @@ public final class ClientProtos { if (other.hasNextCallSeq()) { setNextCallSeq(other.getNextCallSeq()); } + if (other.hasClientHandlesPartials()) { + setClientHandlesPartials(other.getClientHandlesPartials()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -17352,6 +17555,39 @@ public final class ClientProtos { return this; } + // optional bool client_handles_partials = 7; + private boolean clientHandlesPartials_ ; + /** + * optional bool client_handles_partials = 7; + */ + public boolean hasClientHandlesPartials() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional bool client_handles_partials = 7; + */ + public boolean getClientHandlesPartials() { + return clientHandlesPartials_; + } + /** + * optional bool client_handles_partials = 7; + */ + public Builder setClientHandlesPartials(boolean value) { + bitField0_ |= 0x00000040; + clientHandlesPartials_ = value; + onChanged(); + return this; + } + /** + * optional bool client_handles_partials = 7; + */ + public Builder clearClientHandlesPartials() { + bitField0_ = (bitField0_ & ~0x00000040); + clientHandlesPartials_ = false; + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:ScanRequest) } @@ -17504,6 +17740,50 @@ public final class ClientProtos { * optional bool stale = 6; */ boolean getStale(); + + // repeated bool partial_flag_per_result = 7; + /** + * repeated bool partial_flag_per_result = 7; + * + *
+     * This field is filled in if we are doing cellblocks. In the event that a row
+     * could not fit all of its cells into a single RPC chunk, the results will be
+     * returned as partials, and reconstructed into a complete result on the client
+     * side. This field is a list of flags indicating whether or not the result
+     * that the cells belong to is a partial result. For example, if this field
+     * has false, false, true in it, then we know that on the client side, we need to
+     * make another RPC request since the last result was only a partial.
+     * 
+ */ + java.util.List getPartialFlagPerResultList(); + /** + * repeated bool partial_flag_per_result = 7; + * + *
+     * This field is filled in if we are doing cellblocks. In the event that a row
+     * could not fit all of its cells into a single RPC chunk, the results will be
+     * returned as partials, and reconstructed into a complete result on the client
+     * side. This field is a list of flags indicating whether or not the result
+     * that the cells belong to is a partial result. For example, if this field
+     * has false, false, true in it, then we know that on the client side, we need to
+     * make another RPC request since the last result was only a partial.
+     * 
+ */ + int getPartialFlagPerResultCount(); + /** + * repeated bool partial_flag_per_result = 7; + * + *
+     * This field is filled in if we are doing cellblocks. In the event that a row
+     * could not fit all of its cells into a single RPC chunk, the results will be
+     * returned as partials, and reconstructed into a complete result on the client
+     * side. This field is a list of flags indicating whether or not the result
+     * that the cells belong to is a partial result. For example, if this field
+     * has false, false, true in it, then we know that on the client side, we need to
+     * make another RPC request since the last result was only a partial.
+     * 
+ */ + boolean getPartialFlagPerResult(int index); } /** * Protobuf type {@code ScanResponse} @@ -17611,6 +17891,27 @@ public final class ClientProtos { stale_ = input.readBool(); break; } + case 56: { + if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + partialFlagPerResult_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000040; + } + partialFlagPerResult_.add(input.readBool()); + break; + } + case 58: { + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + if (!((mutable_bitField0_ & 0x00000040) == 0x00000040) && input.getBytesUntilLimit() > 0) { + partialFlagPerResult_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000040; + } + while (input.getBytesUntilLimit() > 0) { + partialFlagPerResult_.add(input.readBool()); + } + input.popLimit(limit); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -17625,6 +17926,9 @@ public final class ClientProtos { if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) { results_ = java.util.Collections.unmodifiableList(results_); } + if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + partialFlagPerResult_ = java.util.Collections.unmodifiableList(partialFlagPerResult_); + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } @@ -17840,6 +18144,59 @@ public final class ClientProtos { return stale_; } + // repeated bool partial_flag_per_result = 7; + public static final int PARTIAL_FLAG_PER_RESULT_FIELD_NUMBER = 7; + private java.util.List partialFlagPerResult_; + /** + * repeated bool partial_flag_per_result = 7; + * + *
+     * This field is filled in if we are doing cellblocks. In the event that a row
+     * could not fit all of its cells into a single RPC chunk, the results will be
+     * returned as partials, and reconstructed into a complete result on the client
+     * side. This field is a list of flags indicating whether or not the result
+     * that the cells belong to is a partial result. For example, if this field
+     * has false, false, true in it, then we know that on the client side, we need to
+     * make another RPC request since the last result was only a partial.
+     * 
+ */ + public java.util.List + getPartialFlagPerResultList() { + return partialFlagPerResult_; + } + /** + * repeated bool partial_flag_per_result = 7; + * + *
+     * This field is filled in if we are doing cellblocks. In the event that a row
+     * could not fit all of its cells into a single RPC chunk, the results will be
+     * returned as partials, and reconstructed into a complete result on the client
+     * side. This field is a list of flags indicating whether or not the result
+     * that the cells belong to is a partial result. For example, if this field
+     * has false, false, true in it, then we know that on the client side, we need to
+     * make another RPC request since the last result was only a partial.
+     * 
+ */ + public int getPartialFlagPerResultCount() { + return partialFlagPerResult_.size(); + } + /** + * repeated bool partial_flag_per_result = 7; + * + *
+     * This field is filled in if we are doing cellblocks. In the event that a row
+     * could not fit all of its cells into a single RPC chunk, the results will be
+     * returned as partials, and reconstructed into a complete result on the client
+     * side. This field is a list of flags indicating whether or not the result
+     * that the cells belong to is a partial result. For example, if this field
+     * has false, false, true in it, then we know that on the client side, we need to
+     * make another RPC request since the last result was only a partial.
+     * 
+ */ + public boolean getPartialFlagPerResult(int index) { + return partialFlagPerResult_.get(index); + } + private void initFields() { cellsPerResult_ = java.util.Collections.emptyList(); scannerId_ = 0L; @@ -17847,6 +18204,7 @@ public final class ClientProtos { ttl_ = 0; results_ = java.util.Collections.emptyList(); stale_ = false; + partialFlagPerResult_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -17878,6 +18236,9 @@ public final class ClientProtos { if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeBool(6, stale_); } + for (int i = 0; i < partialFlagPerResult_.size(); i++) { + output.writeBool(7, partialFlagPerResult_.get(i)); + } getUnknownFields().writeTo(output); } @@ -17916,6 +18277,12 @@ public final class ClientProtos { size += com.google.protobuf.CodedOutputStream .computeBoolSize(6, stale_); } + { + int dataSize = 0; + dataSize = 1 * getPartialFlagPerResultList().size(); + size += dataSize; + size += 1 * getPartialFlagPerResultList().size(); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -17963,6 +18330,8 @@ public final class ClientProtos { result = result && (getStale() == other.getStale()); } + result = result && getPartialFlagPerResultList() + .equals(other.getPartialFlagPerResultList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -18000,6 +18369,10 @@ public final class ClientProtos { hash = (37 * hash) + STALE_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getStale()); } + if (getPartialFlagPerResultCount() > 0) { + hash = (37 * hash) + PARTIAL_FLAG_PER_RESULT_FIELD_NUMBER; + hash = (53 * hash) + getPartialFlagPerResultList().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -18132,6 +18505,8 @@ public final class ClientProtos { } stale_ = false; bitField0_ = (bitField0_ & ~0x00000020); + partialFlagPerResult_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); return this; } @@ -18190,6 +18565,11 @@ public final class ClientProtos { to_bitField0_ |= 0x00000008; } result.stale_ = stale_; + if (((bitField0_ & 0x00000040) == 0x00000040)) { + partialFlagPerResult_ = java.util.Collections.unmodifiableList(partialFlagPerResult_); + bitField0_ = (bitField0_ & ~0x00000040); + } + result.partialFlagPerResult_ = partialFlagPerResult_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -18254,6 +18634,16 @@ public final class ClientProtos { if (other.hasStale()) { setStale(other.getStale()); } + if (!other.partialFlagPerResult_.isEmpty()) { + if (partialFlagPerResult_.isEmpty()) { + partialFlagPerResult_ = other.partialFlagPerResult_; + bitField0_ = (bitField0_ & ~0x00000040); + } else { + ensurePartialFlagPerResultIsMutable(); + partialFlagPerResult_.addAll(other.partialFlagPerResult_); + } + onChanged(); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -18897,6 +19287,142 @@ public final class ClientProtos { return this; } + // repeated bool partial_flag_per_result = 7; + private java.util.List partialFlagPerResult_ = java.util.Collections.emptyList(); + private void ensurePartialFlagPerResultIsMutable() { + if (!((bitField0_ & 0x00000040) == 0x00000040)) { + partialFlagPerResult_ = new java.util.ArrayList(partialFlagPerResult_); + bitField0_ |= 0x00000040; + } + } + /** + * repeated bool partial_flag_per_result = 7; + * + *
+       * This field is filled in if we are doing cellblocks. In the event that a row
+       * could not fit all of its cells into a single RPC chunk, the results will be
+       * returned as partials, and reconstructed into a complete result on the client
+       * side. This field is a list of flags indicating whether or not the result
+       * that the cells belong to is a partial result. For example, if this field
+       * has false, false, true in it, then we know that on the client side, we need to
+       * make another RPC request since the last result was only a partial.
+       * 
+ */ + public java.util.List + getPartialFlagPerResultList() { + return java.util.Collections.unmodifiableList(partialFlagPerResult_); + } + /** + * repeated bool partial_flag_per_result = 7; + * + *
+       * This field is filled in if we are doing cellblocks. In the event that a row
+       * could not fit all of its cells into a single RPC chunk, the results will be
+       * returned as partials, and reconstructed into a complete result on the client
+       * side. This field is a list of flags indicating whether or not the result
+       * that the cells belong to is a partial result. For example, if this field
+       * has false, false, true in it, then we know that on the client side, we need to
+       * make another RPC request since the last result was only a partial.
+       * 
+ */ + public int getPartialFlagPerResultCount() { + return partialFlagPerResult_.size(); + } + /** + * repeated bool partial_flag_per_result = 7; + * + *
+       * This field is filled in if we are doing cellblocks. In the event that a row
+       * could not fit all of its cells into a single RPC chunk, the results will be
+       * returned as partials, and reconstructed into a complete result on the client
+       * side. This field is a list of flags indicating whether or not the result
+       * that the cells belong to is a partial result. For example, if this field
+       * has false, false, true in it, then we know that on the client side, we need to
+       * make another RPC request since the last result was only a partial.
+       * 
+ */ + public boolean getPartialFlagPerResult(int index) { + return partialFlagPerResult_.get(index); + } + /** + * repeated bool partial_flag_per_result = 7; + * + *
+       * This field is filled in if we are doing cellblocks. In the event that a row
+       * could not fit all of its cells into a single RPC chunk, the results will be
+       * returned as partials, and reconstructed into a complete result on the client
+       * side. This field is a list of flags indicating whether or not the result
+       * that the cells belong to is a partial result. For example, if this field
+       * has false, false, true in it, then we know that on the client side, we need to
+       * make another RPC request since the last result was only a partial.
+       * 
+ */ + public Builder setPartialFlagPerResult( + int index, boolean value) { + ensurePartialFlagPerResultIsMutable(); + partialFlagPerResult_.set(index, value); + onChanged(); + return this; + } + /** + * repeated bool partial_flag_per_result = 7; + * + *
+       * This field is filled in if we are doing cellblocks. In the event that a row
+       * could not fit all of its cells into a single RPC chunk, the results will be
+       * returned as partials, and reconstructed into a complete result on the client
+       * side. This field is a list of flags indicating whether or not the result
+       * that the cells belong to is a partial result. For example, if this field
+       * has false, false, true in it, then we know that on the client side, we need to
+       * make another RPC request since the last result was only a partial.
+       * 
+ */ + public Builder addPartialFlagPerResult(boolean value) { + ensurePartialFlagPerResultIsMutable(); + partialFlagPerResult_.add(value); + onChanged(); + return this; + } + /** + * repeated bool partial_flag_per_result = 7; + * + *
+       * This field is filled in if we are doing cellblocks. In the event that a row
+       * could not fit all of its cells into a single RPC chunk, the results will be
+       * returned as partials, and reconstructed into a complete result on the client
+       * side. This field is a list of flags indicating whether or not the result
+       * that the cells belong to is a partial result. For example, if this field
+       * has false, false, true in it, then we know that on the client side, we need to
+       * make another RPC request since the last result was only a partial.
+       * 
+ */ + public Builder addAllPartialFlagPerResult( + java.lang.Iterable values) { + ensurePartialFlagPerResultIsMutable(); + super.addAll(values, partialFlagPerResult_); + onChanged(); + return this; + } + /** + * repeated bool partial_flag_per_result = 7; + * + *
+       * This field is filled in if we are doing cellblocks. In the event that a row
+       * could not fit all of its cells into a single RPC chunk, the results will be
+       * returned as partials, and reconstructed into a complete result on the client
+       * side. This field is a list of flags indicating whether or not the result
+       * that the cells belong to is a partial result. For example, if this field
+       * has false, false, true in it, then we know that on the client side, we need to
+       * make another RPC request since the last result was only a partial.
+       * 
+ */ + public Builder clearPartialFlagPerResult() { + partialFlagPerResult_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:ScanResponse) } @@ -31980,105 +32506,108 @@ public final class ClientProtos { "\024\n\014store_offset\030\t \001(\r\022\035\n\016existence_only\030" + "\n \001(\010:\005false\022!\n\022closest_row_before\030\013 \001(\010" + ":\005false\022)\n\013consistency\030\014 \001(\0162\014.Consisten" + - "cy:\006STRONG\"b\n\006Result\022\023\n\004cell\030\001 \003(\0132\005.Cel" + + "cy:\006STRONG\"z\n\006Result\022\023\n\004cell\030\001 \003(\0132\005.Cel" + "l\022\035\n\025associated_cell_count\030\002 \001(\005\022\016\n\006exis" + - "ts\030\003 \001(\010\022\024\n\005stale\030\004 \001(\010:\005false\"A\n\nGetReq" + - "uest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022\021" + - "\n\003get\030\002 \002(\0132\004.Get\"&\n\013GetResponse\022\027\n\006resu" + - "lt\030\001 \001(\0132\007.Result\"\200\001\n\tCondition\022\013\n\003row\030\001" + - " \002(\014\022\016\n\006family\030\002 \002(\014\022\021\n\tqualifier\030\003 \002(\014\022", - "\"\n\014compare_type\030\004 \002(\0162\014.CompareType\022\037\n\nc" + - "omparator\030\005 \002(\0132\013.Comparator\"\265\006\n\rMutatio" + - "nProto\022\013\n\003row\030\001 \001(\014\0220\n\013mutate_type\030\002 \001(\016" + - "2\033.MutationProto.MutationType\0220\n\014column_" + - "value\030\003 \003(\0132\032.MutationProto.ColumnValue\022" + - "\021\n\ttimestamp\030\004 \001(\004\022!\n\tattribute\030\005 \003(\0132\016." + - "NameBytesPair\022:\n\ndurability\030\006 \001(\0162\031.Muta" + - "tionProto.Durability:\013USE_DEFAULT\022\036\n\ntim" + - "e_range\030\007 \001(\0132\n.TimeRange\022\035\n\025associated_" + - "cell_count\030\010 \001(\005\022\r\n\005nonce\030\t \001(\004\032\347\001\n\013Colu", - "mnValue\022\016\n\006family\030\001 \002(\014\022B\n\017qualifier_val" + - "ue\030\002 \003(\0132).MutationProto.ColumnValue.Qua" + - "lifierValue\032\203\001\n\016QualifierValue\022\021\n\tqualif" + - "ier\030\001 \001(\014\022\r\n\005value\030\002 \001(\014\022\021\n\ttimestamp\030\003 " + - "\001(\004\022.\n\013delete_type\030\004 \001(\0162\031.MutationProto" + - ".DeleteType\022\014\n\004tags\030\005 \001(\014\"W\n\nDurability\022" + - "\017\n\013USE_DEFAULT\020\000\022\014\n\010SKIP_WAL\020\001\022\r\n\tASYNC_" + - "WAL\020\002\022\014\n\010SYNC_WAL\020\003\022\r\n\tFSYNC_WAL\020\004\">\n\014Mu" + - "tationType\022\n\n\006APPEND\020\000\022\r\n\tINCREMENT\020\001\022\007\n" + - "\003PUT\020\002\022\n\n\006DELETE\020\003\"p\n\nDeleteType\022\026\n\022DELE", - "TE_ONE_VERSION\020\000\022\034\n\030DELETE_MULTIPLE_VERS" + - "IONS\020\001\022\021\n\rDELETE_FAMILY\020\002\022\031\n\025DELETE_FAMI" + - "LY_VERSION\020\003\"\207\001\n\rMutateRequest\022 \n\006region" + - "\030\001 \002(\0132\020.RegionSpecifier\022 \n\010mutation\030\002 \002" + - "(\0132\016.MutationProto\022\035\n\tcondition\030\003 \001(\0132\n." + - "Condition\022\023\n\013nonce_group\030\004 \001(\004\"<\n\016Mutate" + - "Response\022\027\n\006result\030\001 \001(\0132\007.Result\022\021\n\tpro" + - "cessed\030\002 \001(\010\"\271\003\n\004Scan\022\027\n\006column\030\001 \003(\0132\007." + - "Column\022!\n\tattribute\030\002 \003(\0132\016.NameBytesPai" + - "r\022\021\n\tstart_row\030\003 \001(\014\022\020\n\010stop_row\030\004 \001(\014\022\027", - "\n\006filter\030\005 \001(\0132\007.Filter\022\036\n\ntime_range\030\006 " + - "\001(\0132\n.TimeRange\022\027\n\014max_versions\030\007 \001(\r:\0011" + - "\022\032\n\014cache_blocks\030\010 \001(\010:\004true\022\022\n\nbatch_si" + - "ze\030\t \001(\r\022\027\n\017max_result_size\030\n \001(\004\022\023\n\013sto" + - "re_limit\030\013 \001(\r\022\024\n\014store_offset\030\014 \001(\r\022&\n\036" + - "load_column_families_on_demand\030\r \001(\010\022\r\n\005" + - "small\030\016 \001(\010\022\027\n\010reversed\030\017 \001(\010:\005false\022)\n\013" + - "consistency\030\020 \001(\0162\014.Consistency:\006STRONG\022" + - "\017\n\007caching\030\021 \001(\r\"\236\001\n\013ScanRequest\022 \n\006regi" + - "on\030\001 \001(\0132\020.RegionSpecifier\022\023\n\004scan\030\002 \001(\013", - "2\005.Scan\022\022\n\nscanner_id\030\003 \001(\004\022\026\n\016number_of" + - "_rows\030\004 \001(\r\022\025\n\rclose_scanner\030\005 \001(\010\022\025\n\rne" + - "xt_call_seq\030\006 \001(\004\"\210\001\n\014ScanResponse\022\030\n\020ce" + - "lls_per_result\030\001 \003(\r\022\022\n\nscanner_id\030\002 \001(\004" + - "\022\024\n\014more_results\030\003 \001(\010\022\013\n\003ttl\030\004 \001(\r\022\030\n\007r" + - "esults\030\005 \003(\0132\007.Result\022\r\n\005stale\030\006 \001(\010\"\263\001\n" + - "\024BulkLoadHFileRequest\022 \n\006region\030\001 \002(\0132\020." + - "RegionSpecifier\0225\n\013family_path\030\002 \003(\0132 .B" + - "ulkLoadHFileRequest.FamilyPath\022\026\n\016assign" + - "_seq_num\030\003 \001(\010\032*\n\nFamilyPath\022\016\n\006family\030\001", - " \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025BulkLoadHFileRespo" + - "nse\022\016\n\006loaded\030\001 \002(\010\"a\n\026CoprocessorServic" + - "eCall\022\013\n\003row\030\001 \002(\014\022\024\n\014service_name\030\002 \002(\t" + - "\022\023\n\013method_name\030\003 \002(\t\022\017\n\007request\030\004 \002(\014\"9" + - "\n\030CoprocessorServiceResult\022\035\n\005value\030\001 \001(" + - "\0132\016.NameBytesPair\"d\n\031CoprocessorServiceR" + - "equest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier" + - "\022%\n\004call\030\002 \002(\0132\027.CoprocessorServiceCall\"" + - "]\n\032CoprocessorServiceResponse\022 \n\006region\030" + - "\001 \002(\0132\020.RegionSpecifier\022\035\n\005value\030\002 \002(\0132\016", - ".NameBytesPair\"{\n\006Action\022\r\n\005index\030\001 \001(\r\022" + - " \n\010mutation\030\002 \001(\0132\016.MutationProto\022\021\n\003get" + - "\030\003 \001(\0132\004.Get\022-\n\014service_call\030\004 \001(\0132\027.Cop" + - "rocessorServiceCall\"Y\n\014RegionAction\022 \n\006r" + - "egion\030\001 \002(\0132\020.RegionSpecifier\022\016\n\006atomic\030" + - "\002 \001(\010\022\027\n\006action\030\003 \003(\0132\007.Action\"D\n\017Region" + - "LoadStats\022\027\n\014memstoreLoad\030\001 \001(\005:\0010\022\030\n\rhe" + - "apOccupancy\030\002 \001(\005:\0010\"\266\001\n\021ResultOrExcepti" + - "on\022\r\n\005index\030\001 \001(\r\022\027\n\006result\030\002 \001(\0132\007.Resu" + - "lt\022!\n\texception\030\003 \001(\0132\016.NameBytesPair\0221\n", - "\016service_result\030\004 \001(\0132\031.CoprocessorServi" + - "ceResult\022#\n\tloadStats\030\005 \001(\0132\020.RegionLoad" + - "Stats\"f\n\022RegionActionResult\022-\n\021resultOrE" + - "xception\030\001 \003(\0132\022.ResultOrException\022!\n\tex" + - "ception\030\002 \001(\0132\016.NameBytesPair\"f\n\014MultiRe" + - "quest\022#\n\014regionAction\030\001 \003(\0132\r.RegionActi" + - "on\022\022\n\nnonceGroup\030\002 \001(\004\022\035\n\tcondition\030\003 \001(" + - "\0132\n.Condition\"S\n\rMultiResponse\022/\n\022region" + - "ActionResult\030\001 \003(\0132\023.RegionActionResult\022" + - "\021\n\tprocessed\030\002 \001(\010*\'\n\013Consistency\022\n\n\006STR", - "ONG\020\000\022\014\n\010TIMELINE\020\0012\205\003\n\rClientService\022 \n" + - "\003Get\022\013.GetRequest\032\014.GetResponse\022)\n\006Mutat" + - "e\022\016.MutateRequest\032\017.MutateResponse\022#\n\004Sc" + - "an\022\014.ScanRequest\032\r.ScanResponse\022>\n\rBulkL" + - "oadHFile\022\025.BulkLoadHFileRequest\032\026.BulkLo" + - "adHFileResponse\022F\n\013ExecService\022\032.Coproce" + - "ssorServiceRequest\032\033.CoprocessorServiceR" + - "esponse\022R\n\027ExecRegionServerService\022\032.Cop" + - "rocessorServiceRequest\032\033.CoprocessorServ" + - "iceResponse\022&\n\005Multi\022\r.MultiRequest\032\016.Mu", - "ltiResponseBB\n*org.apache.hadoop.hbase.p" + - "rotobuf.generatedB\014ClientProtosH\001\210\001\001\240\001\001" + "ts\030\003 \001(\010\022\024\n\005stale\030\004 \001(\010:\005false\022\026\n\007partia" + + "l\030\005 \001(\010:\005false\"A\n\nGetRequest\022 \n\006region\030\001" + + " \002(\0132\020.RegionSpecifier\022\021\n\003get\030\002 \002(\0132\004.Ge" + + "t\"&\n\013GetResponse\022\027\n\006result\030\001 \001(\0132\007.Resul" + + "t\"\200\001\n\tCondition\022\013\n\003row\030\001 \002(\014\022\016\n\006family\030\002", + " \002(\014\022\021\n\tqualifier\030\003 \002(\014\022\"\n\014compare_type\030" + + "\004 \002(\0162\014.CompareType\022\037\n\ncomparator\030\005 \002(\0132" + + "\013.Comparator\"\265\006\n\rMutationProto\022\013\n\003row\030\001 " + + "\001(\014\0220\n\013mutate_type\030\002 \001(\0162\033.MutationProto" + + ".MutationType\0220\n\014column_value\030\003 \003(\0132\032.Mu" + + "tationProto.ColumnValue\022\021\n\ttimestamp\030\004 \001" + + "(\004\022!\n\tattribute\030\005 \003(\0132\016.NameBytesPair\022:\n" + + "\ndurability\030\006 \001(\0162\031.MutationProto.Durabi" + + "lity:\013USE_DEFAULT\022\036\n\ntime_range\030\007 \001(\0132\n." + + "TimeRange\022\035\n\025associated_cell_count\030\010 \001(\005", + "\022\r\n\005nonce\030\t \001(\004\032\347\001\n\013ColumnValue\022\016\n\006famil" + + "y\030\001 \002(\014\022B\n\017qualifier_value\030\002 \003(\0132).Mutat" + + "ionProto.ColumnValue.QualifierValue\032\203\001\n\016" + + "QualifierValue\022\021\n\tqualifier\030\001 \001(\014\022\r\n\005val" + + "ue\030\002 \001(\014\022\021\n\ttimestamp\030\003 \001(\004\022.\n\013delete_ty" + + "pe\030\004 \001(\0162\031.MutationProto.DeleteType\022\014\n\004t" + + "ags\030\005 \001(\014\"W\n\nDurability\022\017\n\013USE_DEFAULT\020\000" + + "\022\014\n\010SKIP_WAL\020\001\022\r\n\tASYNC_WAL\020\002\022\014\n\010SYNC_WA" + + "L\020\003\022\r\n\tFSYNC_WAL\020\004\">\n\014MutationType\022\n\n\006AP" + + "PEND\020\000\022\r\n\tINCREMENT\020\001\022\007\n\003PUT\020\002\022\n\n\006DELETE", + "\020\003\"p\n\nDeleteType\022\026\n\022DELETE_ONE_VERSION\020\000" + + "\022\034\n\030DELETE_MULTIPLE_VERSIONS\020\001\022\021\n\rDELETE" + + "_FAMILY\020\002\022\031\n\025DELETE_FAMILY_VERSION\020\003\"\207\001\n" + + "\rMutateRequest\022 \n\006region\030\001 \002(\0132\020.RegionS" + + "pecifier\022 \n\010mutation\030\002 \002(\0132\016.MutationPro" + + "to\022\035\n\tcondition\030\003 \001(\0132\n.Condition\022\023\n\013non" + + "ce_group\030\004 \001(\004\"<\n\016MutateResponse\022\027\n\006resu" + + "lt\030\001 \001(\0132\007.Result\022\021\n\tprocessed\030\002 \001(\010\"\271\003\n" + + "\004Scan\022\027\n\006column\030\001 \003(\0132\007.Column\022!\n\tattrib" + + "ute\030\002 \003(\0132\016.NameBytesPair\022\021\n\tstart_row\030\003", + " \001(\014\022\020\n\010stop_row\030\004 \001(\014\022\027\n\006filter\030\005 \001(\0132\007" + + ".Filter\022\036\n\ntime_range\030\006 \001(\0132\n.TimeRange\022" + + "\027\n\014max_versions\030\007 \001(\r:\0011\022\032\n\014cache_blocks" + + "\030\010 \001(\010:\004true\022\022\n\nbatch_size\030\t \001(\r\022\027\n\017max_" + + "result_size\030\n \001(\004\022\023\n\013store_limit\030\013 \001(\r\022\024" + + "\n\014store_offset\030\014 \001(\r\022&\n\036load_column_fami" + + "lies_on_demand\030\r \001(\010\022\r\n\005small\030\016 \001(\010\022\027\n\010r" + + "eversed\030\017 \001(\010:\005false\022)\n\013consistency\030\020 \001(" + + "\0162\014.Consistency:\006STRONG\022\017\n\007caching\030\021 \001(\r" + + "\"\277\001\n\013ScanRequest\022 \n\006region\030\001 \001(\0132\020.Regio", + "nSpecifier\022\023\n\004scan\030\002 \001(\0132\005.Scan\022\022\n\nscann" + + "er_id\030\003 \001(\004\022\026\n\016number_of_rows\030\004 \001(\r\022\025\n\rc" + + "lose_scanner\030\005 \001(\010\022\025\n\rnext_call_seq\030\006 \001(" + + "\004\022\037\n\027client_handles_partials\030\007 \001(\010\"\251\001\n\014S" + + "canResponse\022\030\n\020cells_per_result\030\001 \003(\r\022\022\n" + + "\nscanner_id\030\002 \001(\004\022\024\n\014more_results\030\003 \001(\010\022" + + "\013\n\003ttl\030\004 \001(\r\022\030\n\007results\030\005 \003(\0132\007.Result\022\r" + + "\n\005stale\030\006 \001(\010\022\037\n\027partial_flag_per_result" + + "\030\007 \003(\010\"\263\001\n\024BulkLoadHFileRequest\022 \n\006regio" + + "n\030\001 \002(\0132\020.RegionSpecifier\0225\n\013family_path", + "\030\002 \003(\0132 .BulkLoadHFileRequest.FamilyPath" + + "\022\026\n\016assign_seq_num\030\003 \001(\010\032*\n\nFamilyPath\022\016" + + "\n\006family\030\001 \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025BulkLoad" + + "HFileResponse\022\016\n\006loaded\030\001 \002(\010\"a\n\026Coproce" + + "ssorServiceCall\022\013\n\003row\030\001 \002(\014\022\024\n\014service_" + + "name\030\002 \002(\t\022\023\n\013method_name\030\003 \002(\t\022\017\n\007reque" + + "st\030\004 \002(\014\"9\n\030CoprocessorServiceResult\022\035\n\005" + + "value\030\001 \001(\0132\016.NameBytesPair\"d\n\031Coprocess" + + "orServiceRequest\022 \n\006region\030\001 \002(\0132\020.Regio" + + "nSpecifier\022%\n\004call\030\002 \002(\0132\027.CoprocessorSe", + "rviceCall\"]\n\032CoprocessorServiceResponse\022" + + " \n\006region\030\001 \002(\0132\020.RegionSpecifier\022\035\n\005val" + + "ue\030\002 \002(\0132\016.NameBytesPair\"{\n\006Action\022\r\n\005in" + + "dex\030\001 \001(\r\022 \n\010mutation\030\002 \001(\0132\016.MutationPr" + + "oto\022\021\n\003get\030\003 \001(\0132\004.Get\022-\n\014service_call\030\004" + + " \001(\0132\027.CoprocessorServiceCall\"Y\n\014RegionA" + + "ction\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022" + + "\016\n\006atomic\030\002 \001(\010\022\027\n\006action\030\003 \003(\0132\007.Action" + + "\"D\n\017RegionLoadStats\022\027\n\014memstoreLoad\030\001 \001(" + + "\005:\0010\022\030\n\rheapOccupancy\030\002 \001(\005:\0010\"\266\001\n\021Resul", + "tOrException\022\r\n\005index\030\001 \001(\r\022\027\n\006result\030\002 " + + "\001(\0132\007.Result\022!\n\texception\030\003 \001(\0132\016.NameBy" + + "tesPair\0221\n\016service_result\030\004 \001(\0132\031.Coproc" + + "essorServiceResult\022#\n\tloadStats\030\005 \001(\0132\020." + + "RegionLoadStats\"f\n\022RegionActionResult\022-\n" + + "\021resultOrException\030\001 \003(\0132\022.ResultOrExcep" + + "tion\022!\n\texception\030\002 \001(\0132\016.NameBytesPair\"" + + "f\n\014MultiRequest\022#\n\014regionAction\030\001 \003(\0132\r." + + "RegionAction\022\022\n\nnonceGroup\030\002 \001(\004\022\035\n\tcond" + + "ition\030\003 \001(\0132\n.Condition\"S\n\rMultiResponse", + "\022/\n\022regionActionResult\030\001 \003(\0132\023.RegionAct" + + "ionResult\022\021\n\tprocessed\030\002 \001(\010*\'\n\013Consiste" + + "ncy\022\n\n\006STRONG\020\000\022\014\n\010TIMELINE\020\0012\205\003\n\rClient" + + "Service\022 \n\003Get\022\013.GetRequest\032\014.GetRespons" + + "e\022)\n\006Mutate\022\016.MutateRequest\032\017.MutateResp" + + "onse\022#\n\004Scan\022\014.ScanRequest\032\r.ScanRespons" + + "e\022>\n\rBulkLoadHFile\022\025.BulkLoadHFileReques" + + "t\032\026.BulkLoadHFileResponse\022F\n\013ExecService" + + "\022\032.CoprocessorServiceRequest\032\033.Coprocess" + + "orServiceResponse\022R\n\027ExecRegionServerSer", + "vice\022\032.CoprocessorServiceRequest\032\033.Copro" + + "cessorServiceResponse\022&\n\005Multi\022\r.MultiRe" + + "quest\032\016.MultiResponseBB\n*org.apache.hado" + + "op.hbase.protobuf.generatedB\014ClientProto" + + "sH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -32114,7 +32643,7 @@ public final class ClientProtos { internal_static_Result_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_Result_descriptor, - new java.lang.String[] { "Cell", "AssociatedCellCount", "Exists", "Stale", }); + new java.lang.String[] { "Cell", "AssociatedCellCount", "Exists", "Stale", "Partial", }); internal_static_GetRequest_descriptor = getDescriptor().getMessageTypes().get(5); internal_static_GetRequest_fieldAccessorTable = new @@ -32174,13 +32703,13 @@ public final class ClientProtos { internal_static_ScanRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ScanRequest_descriptor, - new java.lang.String[] { "Region", "Scan", "ScannerId", "NumberOfRows", "CloseScanner", "NextCallSeq", }); + new java.lang.String[] { "Region", "Scan", "ScannerId", "NumberOfRows", "CloseScanner", "NextCallSeq", "ClientHandlesPartials", }); internal_static_ScanResponse_descriptor = getDescriptor().getMessageTypes().get(13); internal_static_ScanResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ScanResponse_descriptor, - new java.lang.String[] { "CellsPerResult", "ScannerId", "MoreResults", "Ttl", "Results", "Stale", }); + new java.lang.String[] { "CellsPerResult", "ScannerId", "MoreResults", "Ttl", "Results", "Stale", "PartialFlagPerResult", }); internal_static_BulkLoadHFileRequest_descriptor = getDescriptor().getMessageTypes().get(14); internal_static_BulkLoadHFileRequest_fieldAccessorTable = new http://git-wip-us.apache.org/repos/asf/hbase/blob/de9791e9/hbase-protocol/src/main/protobuf/Client.proto ---------------------------------------------------------------------- diff --git a/hbase-protocol/src/main/protobuf/Client.proto b/hbase-protocol/src/main/protobuf/Client.proto index 606ca8d..5142e53 100644 --- a/hbase-protocol/src/main/protobuf/Client.proto +++ b/hbase-protocol/src/main/protobuf/Client.proto @@ -105,6 +105,12 @@ message Result { // Whether or not the results are coming from possibly stale data optional bool stale = 4 [default = false]; + + // Whether or not the entire result could be returned. Results will be split when + // the RPC chunk size limit is reached. Partial results contain only a subset of the + // cells for a row and must be combined with a result containing the remaining cells + // to form a complete result + optional bool partial = 5 [default = false]; } /** @@ -268,6 +274,7 @@ message ScanRequest { optional uint32 number_of_rows = 4; optional bool close_scanner = 5; optional uint64 next_call_seq = 6; + optional bool client_handles_partials = 7; } /** @@ -283,6 +290,7 @@ message ScanResponse { // has 3, 3, 3 in it, then we know that on the client, we are to make // three Results each of three Cells each. repeated uint32 cells_per_result = 1; + optional uint64 scanner_id = 2; optional bool more_results = 3; optional uint32 ttl = 4; @@ -291,6 +299,15 @@ message ScanResponse { // be inside the pb'd Result) repeated Result results = 5; optional bool stale = 6; + + // This field is filled in if we are doing cellblocks. In the event that a row + // could not fit all of its cells into a single RPC chunk, the results will be + // returned as partials, and reconstructed into a complete result on the client + // side. This field is a list of flags indicating whether or not the result + // that the cells belong to is a partial result. For example, if this field + // has false, false, true in it, then we know that on the client side, we need to + // make another RPC request since the last result was only a partial. + repeated bool partial_flag_per_result = 7; } /** http://git-wip-us.apache.org/repos/asf/hbase/blob/de9791e9/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java index 2bab21b..a80a07e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java @@ -72,7 +72,10 @@ public class ClientSideRegionScanner extends AbstractClientScanner { public Result next() throws IOException { values.clear(); - scanner.nextRaw(values, -1); // pass -1 as limit so that we see the whole row. + // negative values indicate no limits + final long remainingResultSize = -1; + final int batchLimit = -1; + scanner.nextRaw(values, batchLimit, remainingResultSize); if (values.isEmpty()) { //we are done return null; http://git-wip-us.apache.org/repos/asf/hbase/blob/de9791e9/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java index baf2aa6..a371e3e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java @@ -26,14 +26,14 @@ import java.util.UUID; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; http://git-wip-us.apache.org/repos/asf/hbase/blob/de9791e9/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java index 1db5671..b5d16ba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java @@ -26,10 +26,10 @@ import java.util.NavigableSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateReque import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse; import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateService; import org.apache.hadoop.hbase.regionserver.InternalScanner; +import org.apache.hadoop.hbase.regionserver.InternalScanner.NextState; import com.google.protobuf.ByteString; import com.google.protobuf.Message; @@ -91,7 +92,7 @@ extends AggregateService implements CoprocessorService, Coprocessor { // qualifier can be null. boolean hasMoreRows = false; do { - hasMoreRows = scanner.next(results); + hasMoreRows = NextState.hasMoreValues(scanner.next(results)); int listSize = results.size(); for (int i = 0; i < listSize; i++) { temp = ci.getValue(colFamily, qualifier, results.get(i)); @@ -145,7 +146,7 @@ extends AggregateService implements CoprocessorService, Coprocessor { } boolean hasMoreRows = false; do { - hasMoreRows = scanner.next(results); + hasMoreRows = NextState.hasMoreValues(scanner.next(results)); int listSize = results.size(); for (int i = 0; i < listSize; i++) { temp = ci.getValue(colFamily, qualifier, results.get(i)); @@ -199,7 +200,7 @@ extends AggregateService implements CoprocessorService, Coprocessor { List results = new ArrayList(); boolean hasMoreRows = false; do { - hasMoreRows = scanner.next(results); + hasMoreRows = NextState.hasMoreValues(scanner.next(results)); int listSize = results.size(); for (int i = 0; i < listSize; i++) { temp = ci.getValue(colFamily, qualifier, results.get(i)); @@ -253,7 +254,7 @@ extends AggregateService implements CoprocessorService, Coprocessor { scanner = env.getRegion().getScanner(scan); boolean hasMoreRows = false; do { - hasMoreRows = scanner.next(results); + hasMoreRows = NextState.hasMoreValues(scanner.next(results)); if (results.size() > 0) { counter++; } @@ -312,7 +313,7 @@ extends AggregateService implements CoprocessorService, Coprocessor { do { results.clear(); - hasMoreRows = scanner.next(results); + hasMoreRows = NextState.hasMoreValues(scanner.next(results)); int listSize = results.size(); for (int i = 0; i < listSize; i++) { sumVal = ci.add(sumVal, ci.castToReturnType(ci.getValue(colFamily, @@ -373,7 +374,7 @@ extends AggregateService implements CoprocessorService, Coprocessor { do { tempVal = null; - hasMoreRows = scanner.next(results); + hasMoreRows = NextState.hasMoreValues(scanner.next(results)); int listSize = results.size(); for (int i = 0; i < listSize; i++) { tempVal = ci.add(tempVal, ci.castToReturnType(ci.getValue(colFamily, @@ -440,7 +441,7 @@ extends AggregateService implements CoprocessorService, Coprocessor { do { tempVal = null; tempWeight = null; - hasMoreRows = scanner.next(results); + hasMoreRows = NextState.hasMoreValues(scanner.next(results)); int listSize = results.size(); for (int i = 0; i < listSize; i++) { Cell kv = results.get(i); http://git-wip-us.apache.org/repos/asf/hbase/blob/de9791e9/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 78c4806..13569d7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -138,10 +138,11 @@ import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.Stor import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.StoreDescriptor; +import org.apache.hadoop.hbase.regionserver.InternalScanner.NextState; import org.apache.hadoop.hbase.regionserver.MultiVersionConsistencyControl.WriteEntry; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; -import org.apache.hadoop.hbase.regionserver.compactions.NoLimitCompactionThroughputController; import org.apache.hadoop.hbase.regionserver.compactions.CompactionThroughputController; +import org.apache.hadoop.hbase.regionserver.compactions.NoLimitCompactionThroughputController; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.regionserver.wal.ReplayHLogKey; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; @@ -5177,8 +5178,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // * If the joined heap data gathering is interrupted due to scan limits, this will * contain the row for which we are populating the values.*/ protected Cell joinedContinuationRow = null; - // KeyValue indicating that limit is reached when scanning - private final KeyValue KV_LIMIT = new KeyValue(); protected final byte[] stopRow; private final FilterWrapper filter; private int batch; @@ -5263,6 +5262,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // return this.readPt; } + @Override + public int getBatch() { + return this.batch; + } + /** * Reset both the filter and the old filter. * @@ -5275,14 +5279,20 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // } @Override - public boolean next(List outResults) + public NextState next(List outResults) throws IOException { // apply the batching limit by default return next(outResults, batch); } @Override - public synchronized boolean next(List outResults, int limit) throws IOException { + public NextState next(List outResults, int limit) throws IOException { + return next(outResults, limit, -1); + } + + @Override + public synchronized NextState next(List outResults, int limit, long remainingResultSize) + throws IOException { if (this.filterClosed) { throw new UnknownScannerException("Scanner was closed (timed out?) " + "after we renewed it. Could be caused by a very slow scanner " + @@ -5291,77 +5301,157 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // startRegionOperation(Operation.SCAN); readRequestsCount.increment(); try { - return nextRaw(outResults, limit); + return nextRaw(outResults, limit, remainingResultSize); } finally { closeRegionOperation(Operation.SCAN); } } @Override - public boolean nextRaw(List outResults) - throws IOException { + public NextState nextRaw(List outResults) throws IOException { return nextRaw(outResults, batch); } @Override - public boolean nextRaw(List outResults, int limit) throws IOException { + public NextState nextRaw(List outResults, int limit) + throws IOException { + return nextRaw(outResults, limit, -1); + } + + @Override + public NextState nextRaw(List outResults, int batchLimit, long remainingResultSize) + throws IOException { if (storeHeap == null) { // scanner is closed throw new UnknownScannerException("Scanner was closed"); } - boolean returnResult; + NextState state; if (outResults.isEmpty()) { // Usually outResults is empty. This is true when next is called // to handle scan or get operation. - returnResult = nextInternal(outResults, limit); + state = nextInternal(outResults, batchLimit, remainingResultSize); } else { List tmpList = new ArrayList(); - returnResult = nextInternal(tmpList, limit); + state = nextInternal(tmpList, batchLimit, remainingResultSize); outResults.addAll(tmpList); } + // State should never be null, this is a precautionary measure + if (state == null) { + if (LOG.isTraceEnabled()) LOG.trace("State was null. Defaulting to no more values state"); + state = NextState.makeState(NextState.State.NO_MORE_VALUES); + } + resetFilters(); if (isFilterDoneInternal()) { - returnResult = false; + state = NextState.makeState(NextState.State.NO_MORE_VALUES, state.getResultSize()); } - return returnResult; + return state; } - private void populateFromJoinedHeap(List results, int limit) - throws IOException { + /** + * @return the state the joinedHeap returned on the call to + * {@link KeyValueHeap#next(List, int, long)} + */ + private NextState populateFromJoinedHeap(List results, int limit, long resultSize) + throws IOException { assert joinedContinuationRow != null; - Cell kv = populateResult(results, this.joinedHeap, limit, + NextState state = + populateResult(results, this.joinedHeap, limit, resultSize, joinedContinuationRow.getRowArray(), joinedContinuationRow.getRowOffset(), joinedContinuationRow.getRowLength()); - if (kv != KV_LIMIT) { + if (state != null && !state.batchLimitReached() && !state.sizeLimitReached()) { // We are done with this row, reset the continuation. joinedContinuationRow = null; } // As the data is obtained from two independent heaps, we need to // ensure that result list is sorted, because Result relies on that. Collections.sort(results, comparator); + return state; } /** - * Fetches records with currentRow into results list, until next row or limit (if not -1). + * Fetches records with currentRow into results list, until next row, batchLimit (if not -1) is + * reached, or remainingResultSize (if not -1) is reaced * @param heap KeyValueHeap to fetch data from.It must be positioned on correct row before call. - * @param limit Max amount of KVs to place in result list, -1 means no limit. + * @param remainingResultSize The remaining space within our result size limit. A negative value + * indicate no limit + * @param batchLimit Max amount of KVs to place in result list, -1 means no limit. * @param currentRow Byte array with key we are fetching. * @param offset offset for currentRow * @param length length for currentRow - * @return KV_LIMIT if limit reached, next KeyValue otherwise. + * @return state of last call to {@link KeyValueHeap#next()} */ - private Cell populateResult(List results, KeyValueHeap heap, int limit, - byte[] currentRow, int offset, short length) throws IOException { + private NextState populateResult(List results, KeyValueHeap heap, int batchLimit, + long remainingResultSize, byte[] currentRow, int offset, short length) throws IOException { Cell nextKv; + boolean moreCellsInRow = false; + long accumulatedResultSize = 0; + List tmpResults = new ArrayList(); do { - heap.next(results, limit - results.size()); - if (limit > 0 && results.size() == limit) { - return KV_LIMIT; + int remainingBatchLimit = batchLimit - results.size(); + NextState heapState = + heap.next(tmpResults, remainingBatchLimit, remainingResultSize - accumulatedResultSize); + results.addAll(tmpResults); + accumulatedResultSize += calculateResultSize(tmpResults, heapState); + tmpResults.clear(); + + if (batchLimit > 0 && results.size() == batchLimit) { + return NextState.makeState(NextState.State.BATCH_LIMIT_REACHED, accumulatedResultSize); } + nextKv = heap.peek(); - } while (nextKv != null && CellUtil.matchingRow(nextKv, currentRow, offset, length)); + moreCellsInRow = moreCellsInRow(nextKv, currentRow, offset, length); + boolean sizeLimitReached = + remainingResultSize > 0 && accumulatedResultSize >= remainingResultSize; + if (moreCellsInRow && sizeLimitReached) { + return NextState.makeState(NextState.State.SIZE_LIMIT_REACHED, accumulatedResultSize); + } + } while (moreCellsInRow); - return nextKv; + if (nextKv != null) { + return NextState.makeState(NextState.State.MORE_VALUES, accumulatedResultSize); + } else { + return NextState.makeState(NextState.State.NO_MORE_VALUES, accumulatedResultSize); + } + } + + /** + * Based on the nextKv in the heap, and the current row, decide whether or not there are more + * cells to be read in the heap. If the row of the nextKv in the heap matches the current row + * then there are more cells to be read in the row. + * @param nextKv + * @param currentRow + * @param offset + * @param length + * @return true When there are more cells in the row to be read + */ + private boolean moreCellsInRow(final Cell nextKv, byte[] currentRow, int offset, + short length) { + return nextKv != null && CellUtil.matchingRow(nextKv, currentRow, offset, length); + } + + /** + * Calculates the size of the results. If the state of the scanner that these results came from + * indicates that an estimate of the result size has already been generated, we can skip the + * calculation and use that instead. + * @param results List of cells we want to calculate size of + * @param state The state returned from the scanner that generated these results + * @return aggregate size of results + */ + private long calculateResultSize(List results, NextState state) { + if (results == null || results.isEmpty()) return 0; + + // In general, the state should contain the estimate because the result size used to + // determine when the scan has exceeded its size limit. If the estimate is contained in the + // state then we can avoid an unnecesasry calculation. + if (state != null && state.hasResultSizeEstimate()) return state.getResultSize(); + + long size = 0; + for (Cell c : results) { + size += CellUtil.estimatedHeapSizeOf(c); + } + + return size; } /* @@ -5376,11 +5466,13 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // return this.filter != null && this.filter.filterAllRemaining(); } - private boolean nextInternal(List results, int limit) - throws IOException { + private NextState nextInternal(List results, int batchLimit, long remainingResultSize) + throws IOException { if (!results.isEmpty()) { throw new IllegalArgumentException("First parameter should be an empty list"); } + // Estimate of the size (heap size) of the results returned from this method + long resultSize = 0; RpcCallContext rpcCall = RpcServer.getCurrentCall(); // The loop here is used only when at some point during the next we determine // that due to effects of filters or otherwise, we have an empty row in the result. @@ -5413,38 +5505,74 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // offset = current.getRowOffset(); length = current.getRowLength(); } + boolean stopRow = isStopRow(currentRow, offset, length); + boolean hasFilterRow = this.filter != null && this.filter.hasFilterRow(); + + // If filter#hasFilterRow is true, partial results are not allowed since allowing them + // would prevent the filters from being evaluated. Thus, if it is true, change the + // remainingResultSize to -1 so that the entire row's worth of cells are fetched. + if (hasFilterRow && remainingResultSize > 0) { + remainingResultSize = -1; + if (LOG.isTraceEnabled()) { + LOG.trace("filter#hasFilterRow is true which prevents partial results from being " + + " formed. The remainingResultSize of: " + remainingResultSize + " will not " + + " be considered when fetching the cells for this row."); + } + } + + NextState joinedHeapState; // Check if we were getting data from the joinedHeap and hit the limit. // If not, then it's main path - getting results from storeHeap. if (joinedContinuationRow == null) { // First, check if we are at a stop row. If so, there are no more results. if (stopRow) { - if (filter != null && filter.hasFilterRow()) { + if (hasFilterRow) { filter.filterRowCells(results); } - return false; + return NextState.makeState(NextState.State.NO_MORE_VALUES, resultSize); } // Check if rowkey filter wants to exclude this row. If so, loop to next. // Technically, if we hit limits before on this row, we don't need this call. if (filterRowKey(currentRow, offset, length)) { boolean moreRows = nextRow(currentRow, offset, length); - if (!moreRows) return false; + if (!moreRows) return NextState.makeState(NextState.State.NO_MORE_VALUES, resultSize); results.clear(); continue; } - Cell nextKv = populateResult(results, this.storeHeap, limit, currentRow, offset, - length); + NextState storeHeapState = + populateResult(results, this.storeHeap, batchLimit, remainingResultSize, currentRow, + offset, length); + resultSize += calculateResultSize(results, storeHeapState); + // Invalid states should never be returned. If one is seen, throw exception + // since we have no way of telling how we should proceed + if (!NextState.isValidState(storeHeapState)) { + throw new IOException("NextState returned from call storeHeap was invalid"); + } + // Ok, we are good, let's try to get some results from the main heap. - if (nextKv == KV_LIMIT) { - if (this.filter != null && filter.hasFilterRow()) { + if (storeHeapState.batchLimitReached()) { + if (hasFilterRow) { throw new IncompatibleFilterException( "Filter whose hasFilterRow() returns true is incompatible with scan with limit!"); } - return true; // We hit the limit. + // We hit the batch limit. + return NextState.makeState(NextState.State.BATCH_LIMIT_REACHED, resultSize); + } else if (storeHeapState.sizeLimitReached()) { + if (hasFilterRow) { + // We try to guard against this case above when remainingResultSize is set to -1 if + // hasFilterRow is true. In the even that the guard doesn't work, an exception must be + // thrown + throw new IncompatibleFilterException( + "Filter whose hasFilterRows() returns true is incompatible with scans that" + + " return partial results"); + } + // We hit the size limit. + return NextState.makeState(NextState.State.SIZE_LIMIT_REACHED, resultSize); } - + Cell nextKv = this.storeHeap.peek(); stopRow = nextKv == null || isStopRow(nextKv.getRowArray(), nextKv.getRowOffset(), nextKv.getRowLength()); // save that the row was empty before filters applied to it. @@ -5453,19 +5581,19 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // // We have the part of the row necessary for filtering (all of it, usually). // First filter with the filterRow(List). FilterWrapper.FilterRowRetCode ret = FilterWrapper.FilterRowRetCode.NOT_CALLED; - if (filter != null && filter.hasFilterRow()) { + if (hasFilterRow) { ret = filter.filterRowCellsWithRet(results); } if ((isEmptyRow || ret == FilterWrapper.FilterRowRetCode.EXCLUDE) || filterRow()) { results.clear(); boolean moreRows = nextRow(currentRow, offset, length); - if (!moreRows) return false; + if (!moreRows) return NextState.makeState(NextState.State.NO_MORE_VALUES, 0); // This row was totally filtered out, if this is NOT the last row, // we should continue on. Otherwise, nothing else to do. if (!stopRow) continue; - return false; + return NextState.makeState(NextState.State.NO_MORE_VALUES, 0); } // Ok, we are done with storeHeap for this row. @@ -5483,18 +5611,31 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // currentRow, offset, length)); if (mayHaveData) { joinedContinuationRow = current; - populateFromJoinedHeap(results, limit); + joinedHeapState = + populateFromJoinedHeap(results, batchLimit, remainingResultSize - resultSize); + resultSize += + joinedHeapState != null && joinedHeapState.hasResultSizeEstimate() ? + joinedHeapState.getResultSize() : 0; + if (joinedHeapState != null && joinedHeapState.sizeLimitReached()) { + return NextState.makeState(NextState.State.SIZE_LIMIT_REACHED, resultSize); + } } } } else { // Populating from the joined heap was stopped by limits, populate some more. - populateFromJoinedHeap(results, limit); + joinedHeapState = + populateFromJoinedHeap(results, batchLimit, remainingResultSize - resultSize); + resultSize += + joinedHeapState != null && joinedHeapState.hasResultSizeEstimate() ? + joinedHeapState.getResultSize() : 0; + if (joinedHeapState != null && joinedHeapState.sizeLimitReached()) { + return NextState.makeState(NextState.State.SIZE_LIMIT_REACHED, resultSize); + } } - // We may have just called populateFromJoinedMap and hit the limits. If that is // the case, we need to call it again on the next next() invocation. if (joinedContinuationRow != null) { - return true; + return NextState.makeState(NextState.State.MORE_VALUES, resultSize); } // Finally, we are done with both joinedHeap and storeHeap. @@ -5502,12 +5643,16 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // // the case when SingleColumnValueExcludeFilter is used. if (results.isEmpty()) { boolean moreRows = nextRow(currentRow, offset, length); - if (!moreRows) return false; + if (!moreRows) return NextState.makeState(NextState.State.NO_MORE_VALUES, 0); if (!stopRow) continue; } // We are done. Return the result. - return !stopRow; + if (stopRow) { + return NextState.makeState(NextState.State.NO_MORE_VALUES, resultSize); + } else { + return NextState.makeState(NextState.State.MORE_VALUES, resultSize); + } } } @@ -7141,7 +7286,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // boolean done; do { kvs.clear(); - done = scanner.next(kvs); + done = NextState.hasMoreValues(scanner.next(kvs)); if (kvs.size() > 0) LOG.info(kvs); } while (done); } finally {