hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject [5/5] hbase git commit: HBASE-11544: [Ergonomics] hbase.client.scanner.caching is dogged and will try to return batch even if it means OOME
Date Thu, 05 Mar 2015 02:04:29 GMT
HBASE-11544: [Ergonomics] hbase.client.scanner.caching is dogged and will try to return batch even if it means OOME

Signed-off-by: stack <stack@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0c64a57e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0c64a57e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0c64a57e

Branch: refs/heads/branch-1
Commit: 0c64a57e529d591a96d56721a1ae538167a03a11
Parents: 1cdcb6e
Author: Jonathan Lawlor <jonathan.lawlor@cloudera.com>
Authored: Wed Feb 4 14:07:35 2015 -0800
Committer: stack <stack@apache.org>
Committed: Wed Mar 4 18:04:15 2015 -0800

----------------------------------------------------------------------
 .../hadoop/hbase/client/ClientScanner.java      | 227 +++++-
 .../org/apache/hadoop/hbase/client/Result.java  | 104 ++-
 .../hbase/client/ReversedClientScanner.java     |  33 +-
 .../org/apache/hadoop/hbase/client/Scan.java    |  34 +
 .../client/ScannerCallableWithReplicas.java     |   4 +-
 .../hadoop/hbase/protobuf/ProtobufUtil.java     |   5 +-
 .../hadoop/hbase/protobuf/RequestConverter.java |   3 +
 .../hbase/protobuf/ResponseConverter.java       |   5 +-
 .../org/apache/hadoop/hbase/HConstants.java     |   2 +-
 .../src/main/resources/hbase-default.xml        |  20 +-
 .../coprocessor/example/BulkDeleteEndpoint.java |   9 +-
 .../coprocessor/example/RowCountEndpoint.java   |   5 +-
 .../hbase/protobuf/generated/ClientProtos.java  | 731 ++++++++++++++---
 hbase-protocol/src/main/protobuf/Client.proto   |  17 +
 .../hbase/client/ClientSideRegionScanner.java   |   5 +-
 .../hbase/client/TableSnapshotScanner.java      |   4 +-
 .../coprocessor/AggregateImplementation.java    |  17 +-
 .../hadoop/hbase/regionserver/HRegion.java      | 243 ++++--
 .../hbase/regionserver/InternalScanner.java     | 205 ++++-
 .../hadoop/hbase/regionserver/KeyValueHeap.java |  39 +-
 .../hbase/regionserver/RSRpcServices.java       |  62 +-
 .../hbase/regionserver/RegionScanner.java       |  54 +-
 .../hadoop/hbase/regionserver/StoreFlusher.java |   5 +-
 .../hadoop/hbase/regionserver/StoreScanner.java |  58 +-
 .../regionserver/compactions/Compactor.java     |   3 +-
 .../security/access/AccessControlLists.java     |   3 +-
 .../hbase/security/access/AccessController.java |   3 +-
 .../org/apache/hadoop/hbase/HBaseTestCase.java  |   3 +-
 .../apache/hadoop/hbase/TestAcidGuarantees.java |   1 -
 .../hbase/TestPartialResultsFromClientSide.java | 787 +++++++++++++++++++
 .../hbase/client/TestIntraRowPagination.java    |   3 +-
 .../coprocessor/ColumnAggregationEndpoint.java  |   4 +-
 .../ColumnAggregationEndpointNullResponse.java  |   3 +-
 .../ColumnAggregationEndpointWithErrors.java    |   3 +-
 .../coprocessor/TestCoprocessorInterface.java   |  27 +-
 .../TestRegionObserverInterface.java            |  19 +-
 .../hbase/filter/TestColumnPrefixFilter.java    |  18 +-
 .../hbase/filter/TestDependentColumnFilter.java |  20 +-
 .../apache/hadoop/hbase/filter/TestFilter.java  |  33 +-
 .../filter/TestInvocationRecordFilter.java      |   7 +-
 .../filter/TestMultipleColumnPrefixFilter.java  |  26 +-
 .../hbase/io/encoding/TestPrefixTree.java       |   9 +-
 .../TestScannerSelectionUsingKeyRange.java      |   5 +-
 .../io/hfile/TestScannerSelectionUsingTTL.java  |   7 +-
 .../hbase/regionserver/TestAtomicOperation.java |   7 +-
 .../hbase/regionserver/TestBlocksScanned.java   |   5 +-
 .../hbase/regionserver/TestColumnSeeking.java   |  19 +-
 .../hbase/regionserver/TestDefaultMemStore.java |  11 +-
 .../regionserver/TestGetClosestAtOrBefore.java  |   9 +-
 .../hadoop/hbase/regionserver/TestHRegion.java  | 129 +--
 .../hbase/regionserver/TestKeepDeletes.java     |   7 +-
 .../hbase/regionserver/TestMajorCompaction.java |  13 +-
 .../regionserver/TestMultiColumnScanner.java    |   5 +-
 .../TestRegionMergeTransaction.java             |   7 +-
 .../regionserver/TestReversibleScanners.java    |   5 +-
 .../regionserver/TestScanWithBloomError.java    |   5 +-
 .../hadoop/hbase/regionserver/TestScanner.java  |  13 +-
 .../regionserver/TestSeekOptimizations.java     |   5 +-
 .../regionserver/TestSplitTransaction.java      |  12 +-
 .../hbase/regionserver/TestStoreScanner.java    |  55 +-
 .../hbase/regionserver/TestStripeCompactor.java |  38 +-
 .../hbase/regionserver/TestWideScanner.java     |   5 +-
 .../compactions/TestStripeCompactionPolicy.java |  21 +-
 .../hbase/regionserver/wal/TestWALReplay.java   |   9 +-
 .../apache/hadoop/hbase/util/TestMergeTool.java |   5 +-
 65 files changed, 2726 insertions(+), 534 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/0c64a57e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
index 11cb990..4394b19 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
@@ -19,7 +19,10 @@ package org.apache.hadoop.hbase.client;
 
 import java.io.IOException;
 import java.io.InterruptedIOException;
+import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.LinkedList;
+import java.util.List;
 import java.util.concurrent.ExecutorService;
 
 import org.apache.commons.logging.Log;
@@ -45,8 +48,6 @@ import org.apache.hadoop.hbase.util.Bytes;
 
 import com.google.common.annotations.VisibleForTesting;
 
-import static org.apache.hadoop.hbase.client.ReversedClientScanner.createClosestRowBefore;
-
 /**
  * Implements the scanner interface for the HBase client.
  * If there are multiple regions in a table, this scanner will iterate
@@ -55,6 +56,9 @@ import static org.apache.hadoop.hbase.client.ReversedClientScanner.createClosest
 @InterfaceAudience.Private
 public class ClientScanner extends AbstractClientScanner {
     private final Log LOG = LogFactory.getLog(this.getClass());
+    // A byte array in which all elements are the max byte, and it is used to
+    // construct closest front row
+    static byte[] MAX_BYTE_ARRAY = Bytes.createMaxByteArray(9);
     protected Scan scan;
     protected boolean closed = false;
     // Current region scanner is against.  Gets cleared if current region goes
@@ -62,6 +66,12 @@ public class ClientScanner extends AbstractClientScanner {
     protected HRegionInfo currentRegion = null;
     protected ScannerCallableWithReplicas callable = null;
     protected final LinkedList<Result> cache = new LinkedList<Result>();
+    /**
+     * A list of partial results that have been returned from the server. This list should only
+     * contain results if this scanner does not have enough partial results to form the complete
+     * result.
+     */
+    protected final LinkedList<Result> partialResults = new LinkedList<Result>();
     protected final int caching;
     protected long lastNext;
     // Keep lastResult returned successfully in case we have to reset scanner.
@@ -338,21 +348,23 @@ public class ClientScanner extends AbstractClientScanner {
         return null;
       }
       if (cache.size() == 0) {
-        Result [] values = null;
+        Result[] values = null;
         long remainingResultSize = maxScannerResultSize;
         int countdown = this.caching;
+
         // We need to reset it if it's a new callable that was created
         // with a countdown in nextScanner
         callable.setCaching(this.caching);
-        // This flag is set when we want to skip the result returned.  We do
+        // This flag is set when we want to skip the result returned. We do
         // this when we reset scanner because it split under us.
-        boolean retryAfterOutOfOrderException  = true;
+        boolean retryAfterOutOfOrderException = true;
         do {
           try {
-            // Server returns a null values if scanning is to stop.  Else,
+            // Server returns a null values if scanning is to stop. Else,
             // returns an empty array if scanning is to go on and we've just
             // exhausted current region.
             values = call(callable, caller, scannerTimeout);
+
             // When the replica switch happens, we need to do certain operations
             // again. The callable will openScanner with the right startkey
             // but we need to pick up from there. Bypass the rest of the loop
@@ -363,9 +375,13 @@ public class ClientScanner extends AbstractClientScanner {
               this.currentRegion = callable.getHRegionInfo();
               continue;
             }
-            retryAfterOutOfOrderException  = true;
+            retryAfterOutOfOrderException = true;
           } catch (DoNotRetryIOException e) {
-            // DNRIOEs are thrown to make us break out of retries.  Some types of DNRIOEs want us
+            // An exception was thrown which makes any partial results that we were collecting
+            // invalid. The scanner will need to be reset to the beginning of a row.
+            partialResults.clear();
+
+            // DNRIOEs are thrown to make us break out of retries. Some types of DNRIOEs want us
             // to reset the scanner and come back in again.
             if (e instanceof UnknownScannerException) {
               long timeout = lastNext + scannerTimeout;
@@ -374,9 +390,9 @@ public class ClientScanner extends AbstractClientScanner {
               // id against the new region server; reset the scanner.
               if (timeout < System.currentTimeMillis()) {
                 long elapsed = System.currentTimeMillis() - lastNext;
-                ScannerTimeoutException ex = new ScannerTimeoutException(
-                    elapsed + "ms passed since the last invocation, " +
-                        "timeout is currently set to " + scannerTimeout);
+                ScannerTimeoutException ex =
+                    new ScannerTimeoutException(elapsed + "ms passed since the last invocation, "
+                        + "timeout is currently set to " + scannerTimeout);
                 ex.initCause(e);
                 throw ex;
               }
@@ -385,8 +401,8 @@ public class ClientScanner extends AbstractClientScanner {
               // the scanner and retry.
               Throwable cause = e.getCause();
               if ((cause != null && cause instanceof NotServingRegionException) ||
-                (cause != null && cause instanceof RegionServerStoppedException) ||
-                e instanceof OutOfOrderScannerNextException) {
+                  (cause != null && cause instanceof RegionServerStoppedException) ||
+                  e instanceof OutOfOrderScannerNextException) {
                 // Pass
                 // It is easier writing the if loop test as list of what is allowed rather than
                 // as a list of what is not allowed... so if in here, it means we do not throw.
@@ -402,9 +418,9 @@ public class ClientScanner extends AbstractClientScanner {
               // scanner starts at the correct row. Otherwise we may see previously
               // returned rows again.
               // (ScannerCallable by now has "relocated" the correct region)
-              if(scan.isReversed()){
+              if (scan.isReversed()) {
                 scan.setStartRow(createClosestRowBefore(lastResult.getRow()));
-              }else {
+              } else {
                 scan.setStartRow(Bytes.add(lastResult.getRow(), new byte[1]));
               }
             }
@@ -414,7 +430,7 @@ public class ClientScanner extends AbstractClientScanner {
               } else {
                 // TODO: Why wrap this in a DNRIOE when it already is a DNRIOE?
                 throw new DoNotRetryIOException("Failed after retry of " +
-                  "OutOfOrderScannerNextException: was there a rpc timeout?", e);
+                    "OutOfOrderScannerNextException: was there a rpc timeout?", e);
               }
             }
             // Clear region.
@@ -422,16 +438,21 @@ public class ClientScanner extends AbstractClientScanner {
             // Set this to zero so we don't try and do an rpc and close on remote server when
             // the exception we got was UnknownScanner or the Server is going down.
             callable = null;
+
             // This continue will take us to while at end of loop where we will set up new scanner.
             continue;
           }
           long currentTime = System.currentTimeMillis();
           if (this.scanMetrics != null) {
-            this.scanMetrics.sumOfMillisSecBetweenNexts.addAndGet(currentTime-lastNext);
+            this.scanMetrics.sumOfMillisSecBetweenNexts.addAndGet(currentTime - lastNext);
           }
           lastNext = currentTime;
-          if (values != null && values.length > 0) {
-            for (Result rs : values) {
+          // Groom the array of Results that we received back from the server before adding that
+          // Results to the scanner's cache. If partial results are not allowed to be seen by the
+          // caller, all book keeping will be performed within this method.
+          List<Result> resultsToAddToCache = getResultsToAddToCache(values);
+          if (!resultsToAddToCache.isEmpty()) {
+            for (Result rs : resultsToAddToCache) {
               cache.add(rs);
               // We don't make Iterator here
               for (Cell cell : rs.rawCells()) {
@@ -442,8 +463,11 @@ public class ClientScanner extends AbstractClientScanner {
             }
           }
           // Values == null means server-side filter has determined we must STOP
-        } while (remainingResultSize > 0 && countdown > 0 &&
-            possiblyNextScanner(countdown, values == null));
+          // !partialResults.isEmpty() means that we are still accumulating partial Results for a
+          // row. We should not change scanners before we receive all the partial Results for that
+          // row.
+        } while (remainingResultSize > 0 && countdown > 0
+            && (!partialResults.isEmpty() || possiblyNextScanner(countdown, values == null)));
       }
 
       if (cache.size() > 0) {
@@ -455,6 +479,145 @@ public class ClientScanner extends AbstractClientScanner {
       return null;
     }
 
+  @VisibleForTesting
+  public int getCacheSize() {
+    return cache != null ? cache.size() : 0;
+  }
+
+  /**
+   * This method ensures all of our book keeping regarding partial results is kept up to date. This
+   * method should be called once we know that the results we received back from the RPC request do
+   * not contain errors. We return a list of results that should be added to the cache. In general,
+   * this list will contain all NON-partial results from the input array (unless the client has
+   * specified that they are okay with receiving partial results)
+   * @return the list of results that should be added to the cache.
+   * @throws IOException
+   */
+  protected List<Result> getResultsToAddToCache(Result[] resultsFromServer) throws IOException {
+    int resultSize = resultsFromServer != null ? resultsFromServer.length : 0;
+    List<Result> resultsToAddToCache = new ArrayList<Result>(resultSize);
+
+    final boolean isBatchSet = scan != null && scan.getBatch() > 0;
+    final boolean allowPartials = scan != null && scan.getAllowPartialResults();
+
+    // If the caller has indicated in their scan that they are okay with seeing partial results,
+    // then simply add all results to the list. Note that since scan batching also returns results
+    // for a row in pieces we treat batch being set as equivalent to allowing partials. The
+    // implication of treating batching as equivalent to partial results is that it is possible
+    // the caller will receive a result back where the number of cells in the result is less than
+    // the batch size even though it may not be the last group of cells for that row.
+    if (allowPartials || isBatchSet) {
+      addResultsToList(resultsToAddToCache, resultsFromServer, 0, resultsFromServer.length);
+      return resultsToAddToCache;
+    }
+
+    // If no results were returned it indicates that we have the all the partial results necessary
+    // to construct the complete result.
+    if (resultsFromServer == null || resultsFromServer.length == 0) {
+      if (!partialResults.isEmpty()) {
+        resultsToAddToCache.add(Result.createCompleteResult(partialResults));
+        partialResults.clear();
+      }
+
+      return resultsToAddToCache;
+    }
+
+    // In every RPC response there should be at most a single partial result. Furthermore, if
+    // there is a partial result, it is guaranteed to be in the last position of the array.
+    Result last = resultsFromServer[resultsFromServer.length - 1];
+    Result partial = last.isPartial() ? last : null;
+
+    if (LOG.isTraceEnabled()) {
+      StringBuilder sb = new StringBuilder();
+      sb.append("number results from RPC: ").append(resultsFromServer.length).append(",");
+      sb.append("partial != null: ").append(partial != null).append(",");
+      sb.append("number of partials so far: ").append(partialResults.size());
+      LOG.trace(sb.toString());
+    }
+
+    // There are four possibilities cases that can occur while handling partial results
+    //
+    // 1. (partial != null && partialResults.isEmpty())
+    // This is the first partial result that we have received. It should be added to
+    // the list of partialResults and await the next RPC request at which point another
+    // portion of the complete result will be received
+    //
+    // 2. (partial != null && !partialResults.isEmpty())
+    // a. values.length == 1
+    // Since partialResults contains some elements, it means that we are expecting to receive
+    // the remainder of the complete result within this RPC response. The fact that a partial result
+    // was returned and it's the ONLY result returned indicates that we are still receiving
+    // fragments of the complete result. The Result can be completely formed only when we have
+    // received all of the fragments and thus in this case we simply add the partial result to
+    // our list.
+    //
+    // b. values.length > 1
+    // More than one result has been returned from the server. The fact that we are accumulating
+    // partials in partialList and we just received more than one result back from the server
+    // indicates that the FIRST result we received from the server must be the final fragment that
+    // can be used to complete our result. What this means is that the partial that we received is
+    // a partial result for a different row, and at this point we should combine the existing
+    // partials into a complete result, clear the partialList, and begin accumulating partials for
+    // a new row
+    //
+    // 3. (partial == null && !partialResults.isEmpty())
+    // No partial was received but we are accumulating partials in our list. That means the final
+    // fragment of the complete result will be the first Result in values[]. We use it to create the
+    // complete Result, clear the list, and add it to the list of Results that must be added to the
+    // cache. All other Results in values[] are added after the complete result to maintain proper
+    // ordering
+    //
+    // 4. (partial == null && partialResults.isEmpty())
+    // Business as usual. We are not accumulating partial results and there wasn't a partial result
+    // in the RPC response. This means that all of the results we received from the server are
+    // complete and can be added directly to the cache
+    if (partial != null && partialResults.isEmpty()) {
+      partialResults.add(partial);
+
+      // Exclude the last result, it's a partial
+      addResultsToList(resultsToAddToCache, resultsFromServer, 0, resultsFromServer.length - 1);
+    } else if (partial != null && !partialResults.isEmpty()) {
+      if (resultsFromServer.length > 1) {
+        Result finalResult = resultsFromServer[0];
+        partialResults.add(finalResult);
+        resultsToAddToCache.add(Result.createCompleteResult(partialResults));
+        partialResults.clear();
+
+        // Exclude first result, it was used to form our complete result
+        // Exclude last result, it's a partial result
+        addResultsToList(resultsToAddToCache, resultsFromServer, 1, resultsFromServer.length - 1);
+      }
+      partialResults.add(partial);
+    } else if (partial == null && !partialResults.isEmpty()) {
+      Result finalResult = resultsFromServer[0];
+      partialResults.add(finalResult);
+      resultsToAddToCache.add(Result.createCompleteResult(partialResults));
+      partialResults.clear();
+
+      // Exclude the first result, it was used to form our complete result
+      addResultsToList(resultsToAddToCache, resultsFromServer, 1, resultsFromServer.length);
+    } else { // partial == null && partialResults.isEmpty() -- business as usual
+      addResultsToList(resultsToAddToCache, resultsFromServer, 0, resultsFromServer.length);
+    }
+
+    return resultsToAddToCache;
+  }
+
+  /**
+   * Helper method for adding results between the indices [start, end) to the outputList
+   * @param outputList the list that results will be added to
+   * @param inputArray the array that results are taken from
+   * @param start beginning index (inclusive)
+   * @param end ending index (exclusive)
+   */
+  private void addResultsToList(List<Result> outputList, Result[] inputArray, int start, int end) {
+    if (inputArray == null || start < 0 || end > inputArray.length) return;
+
+    for (int i = start; i < end; i++) {
+      outputList.add(inputArray[i]);
+    }
+  }
+
     @Override
     public void close() {
       if (!scanMetricsPublished) writeScanMetrics();
@@ -474,4 +637,26 @@ public class ClientScanner extends AbstractClientScanner {
       }
       closed = true;
     }
+
+  /**
+   * Create the closest row before the specified row
+   * @param row
+   * @return a new byte array which is the closest front row of the specified one
+   */
+  protected static byte[] createClosestRowBefore(byte[] row) {
+    if (row == null) {
+      throw new IllegalArgumentException("The passed row is empty");
+    }
+    if (Bytes.equals(row, HConstants.EMPTY_BYTE_ARRAY)) {
+      return MAX_BYTE_ARRAY;
+    }
+    if (row[row.length - 1] == 0) {
+      return Arrays.copyOf(row, row.length - 1);
+    } else {
+      byte[] closestFrontRow = Arrays.copyOf(row, row.length);
+      closestFrontRow[row.length - 1] = (byte) ((closestFrontRow[row.length - 1] & 0xff) - 1);
+      closestFrontRow = Bytes.add(closestFrontRow, MAX_BYTE_ARRAY);
+      return closestFrontRow;
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/0c64a57e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
index c7a0871..66a63ca 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
@@ -19,6 +19,7 @@
 
 package org.apache.hadoop.hbase.client;
 
+import java.io.IOException;
 import java.nio.BufferOverflowException;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
@@ -29,12 +30,11 @@ import java.util.Map;
 import java.util.NavigableMap;
 import java.util.TreeMap;
 
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellScannable;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -81,6 +81,17 @@ public class Result implements CellScannable, CellScanner {
   private Cell[] cells;
   private Boolean exists; // if the query was just to check existence.
   private boolean stale = false;
+
+  /**
+   * Partial results do not contain the full row's worth of cells. The result had to be returned in
+   * parts because the size of the cells in the row exceeded the RPC result size on the server.
+   * Partial results must be combined client side with results representing the remainder of the
+   * row's cells to form the complete result. Partial results and RPC result size allow us to avoid
+   * OOME on the server when servicing requests for large rows. The Scan configuration used to
+   * control the result size on the server is {@link Scan#setMaxResultSize(long)} and the default
+   * value can be seen here: {@link HConstants#DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE}
+   */
+  private boolean partial = false;
   // We're not using java serialization.  Transient here is just a marker to say
   // that this is where we cache row if we're ever asked for it.
   private transient byte [] row = null;
@@ -123,7 +134,7 @@ public class Result implements CellScannable, CellScanner {
   @Deprecated
   public Result(List<KeyValue> kvs) {
     // TODO: Here we presume the passed in Cells are KVs.  One day this won't always be so.
-    this(kvs.toArray(new Cell[kvs.size()]), null, false);
+    this(kvs.toArray(new Cell[kvs.size()]), null, false, false);
   }
 
   /**
@@ -132,7 +143,7 @@ public class Result implements CellScannable, CellScanner {
    * @param cells List of cells
    */
   public static Result create(List<Cell> cells) {
-    return new Result(cells.toArray(new Cell[cells.size()]), null, false);
+    return create(cells, null);
   }
 
   public static Result create(List<Cell> cells, Boolean exists) {
@@ -140,10 +151,14 @@ public class Result implements CellScannable, CellScanner {
   }
 
   public static Result create(List<Cell> cells, Boolean exists, boolean stale) {
+    return create(cells, exists, stale, false);
+  }
+
+  public static Result create(List<Cell> cells, Boolean exists, boolean stale, boolean partial) {
     if (exists != null){
-      return new Result(null, exists, stale);
+      return new Result(null, exists, stale, partial);
     }
-    return new Result(cells.toArray(new Cell[cells.size()]), null, stale);
+    return new Result(cells.toArray(new Cell[cells.size()]), null, stale, partial);
   }
 
   /**
@@ -152,21 +167,26 @@ public class Result implements CellScannable, CellScanner {
    * @param cells array of cells
    */
   public static Result create(Cell[] cells) {
-    return new Result(cells, null, false);
+    return create(cells, null, false);
   }
 
   public static Result create(Cell[] cells, Boolean exists, boolean stale) {
+    return create(cells, exists, stale, false);
+  }
+
+  public static Result create(Cell[] cells, Boolean exists, boolean stale, boolean partial) {
     if (exists != null){
-      return new Result(null, exists, stale);
+      return new Result(null, exists, stale, partial);
     }
-    return new Result(cells, null, stale);
+    return new Result(cells, null, stale, partial);
   }
 
   /** Private ctor. Use {@link #create(Cell[])}. */
-  private Result(Cell[] cells, Boolean exists, boolean stale) {
+  private Result(Cell[] cells, Boolean exists, boolean stale, boolean partial) {
     this.cells = cells;
     this.exists = exists;
     this.stale = stale;
+    this.partial = partial;
   }
 
   /**
@@ -823,7 +843,59 @@ public class Result implements CellScannable, CellScanner {
   }
 
   /**
-   * Get total size of raw cells 
+   * Forms a single result from the partial results in the partialResults list. This method is
+   * useful for reconstructing partial results on the client side.
+   * @param partialResults list of partial results
+   * @return The complete result that is formed by combining all of the partial results together
+   * @throws IOException A complete result cannot be formed because the results in the partial list
+   *           come from different rows
+   */
+  public static Result createCompleteResult(List<Result> partialResults)
+      throws IOException {
+    List<Cell> cells = new ArrayList<Cell>();
+    boolean stale = false;
+    byte[] prevRow = null;
+    byte[] currentRow = null;
+
+    if (partialResults != null && !partialResults.isEmpty()) {
+      for (int i = 0; i < partialResults.size(); i++) {
+        Result r = partialResults.get(i);
+        currentRow = r.getRow();
+        if (prevRow != null && !Bytes.equals(prevRow, currentRow)) {
+          throw new IOException(
+              "Cannot form complete result. Rows of partial results do not match." +
+                  " Partial Results: " + partialResults);
+        }
+
+        // Ensure that all Results except the last one are marked as partials. The last result
+        // may not be marked as a partial because Results are only marked as partials when
+        // the scan on the server side must be stopped due to reaching the maxResultSize.
+        // Visualizing it makes it easier to understand:
+        // maxResultSize: 2 cells
+        // (-x-) represents cell number x in a row
+        // Example: row1: -1- -2- -3- -4- -5- (5 cells total)
+        // How row1 will be returned by the server as partial Results:
+        // Result1: -1- -2- (2 cells, size limit reached, mark as partial)
+        // Result2: -3- -4- (2 cells, size limit reached, mark as partial)
+        // Result3: -5- (1 cell, size limit NOT reached, NOT marked as partial)
+        if (i != (partialResults.size() - 1) && !r.isPartial()) {
+          throw new IOException(
+              "Cannot form complete result. Result is missing partial flag. " +
+                  "Partial Results: " + partialResults);
+        }
+        prevRow = currentRow;
+        stale = stale || r.isStale();
+        for (Cell c : r.rawCells()) {
+          cells.add(c);
+        }
+      }
+    }
+
+    return Result.create(cells, null, stale);
+  }
+
+  /**
+   * Get total size of raw cells
    * @param result
    * @return Total size.
    */
@@ -882,6 +954,16 @@ public class Result implements CellScannable, CellScanner {
   }
 
   /**
+   * Whether or not the result is a partial result. Partial results contain a subset of the cells
+   * for a row and should be combined with a result representing the remaining cells in that row to
+   * form a complete (non-partial) result.
+   * @return Whether or not the result is a partial result
+   */
+  public boolean isPartial() {
+    return partial;
+  }
+
+  /**
    * Add load information about the region to the information about the result
    * @param loadStats statistics about the current region from which this was returned
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/0c64a57e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java
index 8681e19..13b164d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hbase.client;
 
 import java.io.IOException;
-import java.util.Arrays;
 import java.util.concurrent.ExecutorService;
 
 import org.apache.commons.logging.Log;
@@ -38,9 +37,6 @@ import org.apache.hadoop.hbase.util.ExceptionUtil;
 @InterfaceAudience.Private
 public class ReversedClientScanner extends ClientScanner {
   private static final Log LOG = LogFactory.getLog(ReversedClientScanner.class);
-  // A byte array in which all elements are the max byte, and it is used to
-  // construct closest front row
-  static byte[] MAX_BYTE_ARRAY = Bytes.createMaxByteArray(9);
 
   /**
    * Create a new ReversibleClientScanner for the specified table Note that the
@@ -139,9 +135,10 @@ public class ReversedClientScanner extends ClientScanner {
         new ReversedScannerCallable(getConnection(), getTable(), scan, this.scanMetrics,
             locateStartRow, this.rpcControllerFactory);
     s.setCaching(nbRows);
-    ScannerCallableWithReplicas sr = new ScannerCallableWithReplicas(getTable(), getConnection(),
-        s, pool, primaryOperationTimeout, scan,
-        getRetries(), getScannerTimeout(), caching, getConf(), caller);
+    ScannerCallableWithReplicas sr =
+        new ScannerCallableWithReplicas(getTable(), getConnection(), s, pool,
+            primaryOperationTimeout, scan, getRetries(), getScannerTimeout(), caching, getConf(),
+            caller);
     return sr;
   }
 
@@ -161,26 +158,4 @@ public class ReversedClientScanner extends ClientScanner {
     }
     return false; // unlikely.
   }
-
-  /**
-   * Create the closest row before the specified row
-   * @param row
-   * @return a new byte array which is the closest front row of the specified one
-   */
-  protected static byte[] createClosestRowBefore(byte[] row) {
-    if (row == null) {
-      throw new IllegalArgumentException("The passed row is empty");
-    }
-    if (Bytes.equals(row, HConstants.EMPTY_BYTE_ARRAY)) {
-      return MAX_BYTE_ARRAY;
-    }
-    if (row[row.length - 1] == 0) {
-      return Arrays.copyOf(row, row.length - 1);
-    } else {
-      byte[] closestFrontRow = Arrays.copyOf(row, row.length);
-      closestFrontRow[row.length - 1] = (byte) ((closestFrontRow[row.length - 1] & 0xff) - 1);
-      closestFrontRow = Bytes.add(closestFrontRow, MAX_BYTE_ARRAY);
-      return closestFrontRow;
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/0c64a57e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
index 24af2a0..4604c32 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
@@ -98,6 +98,19 @@ public class Scan extends Query {
   private int maxVersions = 1;
   private int batch = -1;
 
+  /**
+   * Partial {@link Result}s are {@link Result}s must be combined to form a complete {@link Result}.
+   * The {@link Result}s had to be returned in fragments (i.e. as partials) because the size of the
+   * cells in the row exceeded max result size on the server. Typically partial results will be
+   * combined client side into complete results before being delivered to the caller. However, if
+   * this flag is set, the caller is indicating that they do not mind seeing partial results (i.e.
+   * they understand that the results returned from the Scanner may only represent part of a
+   * particular row). In such a case, any attempt to combine the partials into a complete result on
+   * the client side will be skipped, and the caller will be able to see the exact results returned
+   * from the server.
+   */
+  private boolean allowPartialResults = false;
+
   private int storeLimit = -1;
   private int storeOffset = 0;
   private boolean getScan;
@@ -671,6 +684,27 @@ public class Scan extends Query {
   }
 
   /**
+   * Setting whether the caller wants to see the partial results that may be returned from the
+   * server. By default this value is false and the complete results will be assembled client side
+   * before being delivered to the caller.
+   * @param allowPartialResults
+   * @return this
+   */
+  public Scan setAllowPartialResults(final boolean allowPartialResults) {
+    this.allowPartialResults = allowPartialResults;
+    return this;
+  }
+
+  /**
+   * @return true when the constructor of this scan understands that the results they will see may
+   *         only represent a partial portion of a row. The entire row would be retrieved by
+   *         subsequent calls to {@link ResultScanner#next()}
+   */
+  public boolean getAllowPartialResults() {
+    return allowPartialResults;
+  }
+
+  /**
    * Set the value indicating whether loading CFs on demand should be allowed (cluster
    * default is false). On-demand CF loading doesn't load column families until necessary, e.g.
    * if you filter on one column, the other column family data will be loaded only for the rows

http://git-wip-us.apache.org/repos/asf/hbase/blob/0c64a57e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java
index a77b88f..38a6481 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.hbase.client;
 
+import static org.apache.hadoop.hbase.client.ClientScanner.createClosestRowBefore;
+
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.util.ArrayList;
@@ -44,8 +46,6 @@ import org.apache.hadoop.hbase.util.Pair;
 
 import com.google.common.annotations.VisibleForTesting;
 
-import static org.apache.hadoop.hbase.client.ReversedClientScanner.createClosestRowBefore;
-
 /**
  * This class has the logic for handling scanners for regions with and without replicas.
  * 1. A scan is attempted on the default (primary) region

http://git-wip-us.apache.org/repos/asf/hbase/blob/0c64a57e/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index dc81a69..55311e2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -36,7 +36,6 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.NavigableSet;
 
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
@@ -52,6 +51,7 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.Consistency;
 import org.apache.hadoop.hbase.client.Delete;
@@ -1284,6 +1284,7 @@ public final class ProtobufUtil {
     }
 
     builder.setStale(result.isStale());
+    builder.setPartial(result.isPartial());
 
     return builder.build();
   }
@@ -1342,7 +1343,7 @@ public final class ProtobufUtil {
     for (CellProtos.Cell c : values) {
       cells.add(toCell(c));
     }
-    return Result.create(cells, null, proto.getStale());
+    return Result.create(cells, null, proto.getStale(), proto.getPartial());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/0c64a57e/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
index c51925c..abd9c0f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
@@ -488,6 +488,7 @@ public final class RequestConverter {
     builder.setCloseScanner(closeScanner);
     builder.setRegion(region);
     builder.setScan(ProtobufUtil.toScan(scan));
+    builder.setClientHandlesPartials(true);
     return builder.build();
   }
 
@@ -505,6 +506,7 @@ public final class RequestConverter {
     builder.setNumberOfRows(numberOfRows);
     builder.setCloseScanner(closeScanner);
     builder.setScannerId(scannerId);
+    builder.setClientHandlesPartials(true);
     return builder.build();
   }
 
@@ -524,6 +526,7 @@ public final class RequestConverter {
     builder.setCloseScanner(closeScanner);
     builder.setScannerId(scannerId);
     builder.setNextCallSeq(nextCallSeq);
+    builder.setClientHandlesPartials(true);
     return builder.build();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/0c64a57e/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java
index 9493668..984c4b9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java
@@ -339,6 +339,9 @@ public final class ResponseConverter {
         // Cells are out in cellblocks.  Group them up again as Results.  How many to read at a
         // time will be found in getCellsLength -- length here is how many Cells in the i'th Result
         int noOfCells = response.getCellsPerResult(i);
+        boolean isPartial =
+            response.getPartialFlagPerResultCount() > i ?
+                response.getPartialFlagPerResult(i) : false;
         List<Cell> cells = new ArrayList<Cell>(noOfCells);
         for (int j = 0; j < noOfCells; j++) {
           try {
@@ -361,7 +364,7 @@ public final class ResponseConverter {
           }
           cells.add(cellScanner.current());
         }
-        results[i] = Result.create(cells, null, response.getStale());
+        results[i] = Result.create(cells, null, response.getStale(), isPartial);
       } else {
         // Result is pure pb.
         results[i] = ProtobufUtil.toResult(response.getResults(i));

http://git-wip-us.apache.org/repos/asf/hbase/blob/0c64a57e/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index c17b16b..686ff15 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -681,7 +681,7 @@ public final class HConstants {
   /**
    * Default value for {@link #HBASE_CLIENT_SCANNER_CACHING}
    */
-  public static final int DEFAULT_HBASE_CLIENT_SCANNER_CACHING = 100;
+  public static final int DEFAULT_HBASE_CLIENT_SCANNER_CACHING = Integer.MAX_VALUE;
 
   /**
    * Parameter name for number of versions, kept by meta table.

http://git-wip-us.apache.org/repos/asf/hbase/blob/0c64a57e/hbase-common/src/main/resources/hbase-default.xml
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml
index e606994..df1894b 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -527,13 +527,19 @@ possible configurations would overwhelm and obscure the important.
   </property>
   <property>
     <name>hbase.client.scanner.caching</name>
-    <value>100</value>
-    <description>Number of rows that will be fetched when calling next
-    on a scanner if it is not served from (local, client) memory. Higher
-    caching values will enable faster scanners but will eat up more memory
-    and some calls of next may take longer and longer times when the cache is empty.
-    Do not set this value such that the time between invocations is greater
-    than the scanner timeout; i.e. hbase.client.scanner.timeout.period</description>
+    <value>2147483647</value>
+    <description>Number of rows that we try to fetch when calling next
+    on a scanner if it is not served from (local, client) memory. This configuration
+    works together with hbase.client.scanner.max.result.size to try and use the
+    network efficiently. The default value is Integer.MAX_VALUE by default so that
+    the network will fill the chunk size defined by hbase.client.scanner.max.result.size
+    rather than be limited by a particular number of rows since the size of rows varies
+    table to table. If you know ahead of time that you will not require more than a certain
+    number of rows from a scan, this configuration should be set to that row limit via
+    Scan#setCaching. Higher caching values will enable faster scanners but will eat up more
+    memory and some calls of next may take longer and longer times when the cache is empty.
+    Do not set this value such that the time between invocations is greater than the scanner
+    timeout; i.e. hbase.client.scanner.timeout.period</description>
   </property>
   <property>
     <name>hbase.client.keyvalue.maxsize</name>

http://git-wip-us.apache.org/repos/asf/hbase/blob/0c64a57e/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java
----------------------------------------------------------------------
diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java
index 0b65341..90ef637 100644
--- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java
+++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java
@@ -26,11 +26,11 @@ import java.util.TreeSet;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.Coprocessor;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Mutation;
@@ -39,14 +39,15 @@ import org.apache.hadoop.hbase.coprocessor.CoprocessorException;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest;
-import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse;
-import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteService;
 import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType;
+import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse;
 import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse.Builder;
+import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteService;
 import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.ResponseConverter;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.InternalScanner.NextState;
 import org.apache.hadoop.hbase.regionserver.OperationStatus;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -136,7 +137,7 @@ public class BulkDeleteEndpoint extends BulkDeleteService implements Coprocessor
         List<List<Cell>> deleteRows = new ArrayList<List<Cell>>(rowBatchSize);
         for (int i = 0; i < rowBatchSize; i++) {
           List<Cell> results = new ArrayList<Cell>();
-          hasMore = scanner.next(results);
+          hasMore = NextState.hasMoreValues(scanner.next(results));
           if (results.size() > 0) {
             deleteRows.add(results);
           }

http://git-wip-us.apache.org/repos/asf/hbase/blob/0c64a57e/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RowCountEndpoint.java
----------------------------------------------------------------------
diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RowCountEndpoint.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RowCountEndpoint.java
index 4309cdc..2afd05e 100644
--- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RowCountEndpoint.java
+++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RowCountEndpoint.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos;
 import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
 import org.apache.hadoop.hbase.protobuf.ResponseConverter;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.InternalScanner.NextState;
 import org.apache.hadoop.hbase.util.Bytes;
 
 import com.google.protobuf.RpcCallback;
@@ -80,7 +81,7 @@ public class RowCountEndpoint extends ExampleProtos.RowCountService
       byte[] lastRow = null;
       long count = 0;
       do {
-        hasMore = scanner.next(results);
+        hasMore = NextState.hasMoreValues(scanner.next(results));
         for (Cell kv : results) {
           byte[] currentRow = CellUtil.cloneRow(kv);
           if (lastRow == null || !Bytes.equals(lastRow, currentRow)) {
@@ -119,7 +120,7 @@ public class RowCountEndpoint extends ExampleProtos.RowCountService
       boolean hasMore = false;
       long count = 0;
       do {
-        hasMore = scanner.next(results);
+        hasMore = NextState.hasMoreValues(scanner.next(results));
         for (Cell kv : results) {
           count++;
         }


Mime
View raw message