hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From zhang...@apache.org
Subject [1/3] hbase git commit: HBASE-17508 Unify the implementation of small scan and regular scan for sync client
Date Sun, 05 Feb 2017 00:59:50 GMT
Repository: hbase
Updated Branches:
  refs/heads/branch-1 fb1239766 -> 4456d2285


http://git-wip-us.apache.org/repos/asf/hbase/blob/4456d228/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
index b19b482..27518ca 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
@@ -63,27 +63,27 @@ public class SyncTable extends Configured implements Tool {
   static final String SOURCE_ZK_CLUSTER_CONF_KEY = "sync.table.source.zk.cluster";
   static final String TARGET_ZK_CLUSTER_CONF_KEY = "sync.table.target.zk.cluster";
   static final String DRY_RUN_CONF_KEY="sync.table.dry.run";
-  
+
   Path sourceHashDir;
   String sourceTableName;
   String targetTableName;
-  
+
   String sourceZkCluster;
   String targetZkCluster;
   boolean dryRun;
-  
+
   Counters counters;
-  
+
   public SyncTable(Configuration conf) {
     super(conf);
   }
-  
+
   public Job createSubmittableJob(String[] args) throws IOException {
     FileSystem fs = sourceHashDir.getFileSystem(getConf());
     if (!fs.exists(sourceHashDir)) {
       throw new IOException("Source hash dir not found: " + sourceHashDir);
     }
-    
+
     HashTable.TableHash tableHash = HashTable.TableHash.read(getConf(), sourceHashDir);
     LOG.info("Read source hash manifest: " + tableHash);
     LOG.info("Read " + tableHash.partitions.size() + " partition keys");
@@ -97,7 +97,7 @@ public class SyncTable extends Configured implements Tool {
           + " says numHashFiles=" + tableHash.numHashFiles + " but the number of partition
keys"
           + " found in the partitions file is " + tableHash.partitions.size());
     }
-    
+
     Path dataDir = new Path(sourceHashDir, HashTable.HASH_DATA_DIR);
     int dataSubdirCount = 0;
     for (FileStatus file : fs.listStatus(dataDir)) {
@@ -105,14 +105,14 @@ public class SyncTable extends Configured implements Tool {
         dataSubdirCount++;
       }
     }
-    
+
     if (dataSubdirCount != tableHash.numHashFiles) {
       throw new RuntimeException("Hash data appears corrupt. The number of of hash files
created"
           + " should be 1 more than the number of partition keys.  However, the number of
data dirs"
           + " found is " + dataSubdirCount + " but the number of partition keys"
           + " found in the partitions file is " + tableHash.partitions.size());
     }
-    
+
     Job job = Job.getInstance(getConf(),getConf().get("mapreduce.job.name",
         "syncTable_" + sourceTableName + "-" + targetTableName));
     Configuration jobConf = job.getConfiguration();
@@ -127,12 +127,12 @@ public class SyncTable extends Configured implements Tool {
       jobConf.set(TARGET_ZK_CLUSTER_CONF_KEY, targetZkCluster);
     }
     jobConf.setBoolean(DRY_RUN_CONF_KEY, dryRun);
-    
+
     TableMapReduceUtil.initTableMapperJob(targetTableName, tableHash.initScan(),
         SyncMapper.class, null, null, job);
-    
+
     job.setNumReduceTasks(0);
-     
+
     if (dryRun) {
       job.setOutputFormatClass(NullOutputFormat.class);
     } else {
@@ -140,37 +140,37 @@ public class SyncTable extends Configured implements Tool {
       // because it sets up the TableOutputFormat.
       TableMapReduceUtil.initTableReducerJob(targetTableName, null, job, null,
           targetZkCluster, null, null);
-      
+
       // would be nice to add an option for bulk load instead
     }
-    
+
     return job;
   }
-  
+
   public static class SyncMapper extends TableMapper<ImmutableBytesWritable, Mutation>
{
     Path sourceHashDir;
-    
+
     Connection sourceConnection;
     Connection targetConnection;
     Table sourceTable;
     Table targetTable;
     boolean dryRun;
-    
+
     HashTable.TableHash sourceTableHash;
     HashTable.TableHash.Reader sourceHashReader;
     ImmutableBytesWritable currentSourceHash;
     ImmutableBytesWritable nextSourceKey;
     HashTable.ResultHasher targetHasher;
-    
+
     Throwable mapperException;
-     
+
     public static enum Counter {BATCHES, HASHES_MATCHED, HASHES_NOT_MATCHED, SOURCEMISSINGROWS,
       SOURCEMISSINGCELLS, TARGETMISSINGROWS, TARGETMISSINGCELLS, ROWSWITHDIFFS, DIFFERENTCELLVALUES,
       MATCHINGROWS, MATCHINGCELLS, EMPTY_BATCHES, RANGESMATCHED, RANGESNOTMATCHED};
-    
+
     @Override
     protected void setup(Context context) throws IOException {
-      
+
       Configuration conf = context.getConfiguration();
       sourceHashDir = new Path(conf.get(SOURCE_HASH_DIR_CONF_KEY));
       sourceConnection = openConnection(conf, SOURCE_ZK_CLUSTER_CONF_KEY, null);
@@ -179,23 +179,23 @@ public class SyncTable extends Configured implements Tool {
       sourceTable = openTable(sourceConnection, conf, SOURCE_TABLE_CONF_KEY);
       targetTable = openTable(targetConnection, conf, TARGET_TABLE_CONF_KEY);
       dryRun = conf.getBoolean(SOURCE_TABLE_CONF_KEY, false);
-      
+
       sourceTableHash = HashTable.TableHash.read(conf, sourceHashDir);
       LOG.info("Read source hash manifest: " + sourceTableHash);
       LOG.info("Read " + sourceTableHash.partitions.size() + " partition keys");
-      
+
       TableSplit split = (TableSplit) context.getInputSplit();
       ImmutableBytesWritable splitStartKey = new ImmutableBytesWritable(split.getStartRow());
-      
+
       sourceHashReader = sourceTableHash.newReader(conf, splitStartKey);
       findNextKeyHashPair();
-      
+
       // create a hasher, but don't start it right away
       // instead, find the first hash batch at or after the start row
       // and skip any rows that come before.  they will be caught by the previous task
       targetHasher = new HashTable.ResultHasher();
     }
-  
+
     private static Connection openConnection(Configuration conf, String zkClusterConfKey,
                                              String configPrefix)
       throws IOException {
@@ -204,12 +204,12 @@ public class SyncTable extends Configured implements Tool {
             zkCluster, configPrefix);
         return ConnectionFactory.createConnection(clusterConf);
     }
-    
+
     private static Table openTable(Connection connection, Configuration conf,
         String tableNameConfKey) throws IOException {
       return connection.getTable(TableName.valueOf(conf.get(tableNameConfKey)));
     }
-    
+
     /**
      * Attempt to read the next source key/hash pair.
      * If there are no more, set nextSourceKey to null
@@ -223,7 +223,7 @@ public class SyncTable extends Configured implements Tool {
         nextSourceKey = null;
       }
     }
-    
+
     @Override
     protected void map(ImmutableBytesWritable key, Result value, Context context)
         throws IOException, InterruptedException {
@@ -232,7 +232,7 @@ public class SyncTable extends Configured implements Tool {
         while (nextSourceKey != null && key.compareTo(nextSourceKey) >= 0) {
           moveToNextBatch(context);
         }
-        
+
         // next, add the scanned row (as long as we've reached the first batch)
         if (targetHasher.isBatchStarted()) {
           targetHasher.hashResult(value);
@@ -247,7 +247,7 @@ public class SyncTable extends Configured implements Tool {
 
     /**
      * If there is an open hash batch, complete it and sync if there are diffs.
-     * Start a new batch, and seek to read the 
+     * Start a new batch, and seek to read the
      */
     private void moveToNextBatch(Context context) throws IOException, InterruptedException
{
       if (targetHasher.isBatchStarted()) {
@@ -255,7 +255,7 @@ public class SyncTable extends Configured implements Tool {
       }
       targetHasher.startBatch(nextSourceKey);
       currentSourceHash = sourceHashReader.getCurrentHash();
-      
+
       findNextKeyHashPair();
     }
 
@@ -276,28 +276,28 @@ public class SyncTable extends Configured implements Tool {
         context.getCounter(Counter.HASHES_MATCHED).increment(1);
       } else {
         context.getCounter(Counter.HASHES_NOT_MATCHED).increment(1);
-        
+
         ImmutableBytesWritable stopRow = nextSourceKey == null
                                           ? new ImmutableBytesWritable(sourceTableHash.stopRow)
                                           : nextSourceKey;
-        
+
         if (LOG.isDebugEnabled()) {
           LOG.debug("Hash mismatch.  Key range: " + toHex(targetHasher.getBatchStartKey())
               + " to " + toHex(stopRow)
               + " sourceHash: " + toHex(currentSourceHash)
               + " targetHash: " + toHex(targetHash));
         }
-        
+
         syncRange(context, targetHasher.getBatchStartKey(), stopRow);
       }
     }
     private static String toHex(ImmutableBytesWritable bytes) {
       return Bytes.toHex(bytes.get(), bytes.getOffset(), bytes.getLength());
     }
-    
+
     private static final CellScanner EMPTY_CELL_SCANNER
       = new CellScanner(Iterators.<Result>emptyIterator());
-    
+
     /**
      * Rescan the given range directly from the source and target tables.
      * Count and log differences, and if this is not a dry run, output Puts and Deletes
@@ -305,17 +305,16 @@ public class SyncTable extends Configured implements Tool {
      */
     private void syncRange(Context context, ImmutableBytesWritable startRow,
         ImmutableBytesWritable stopRow) throws IOException, InterruptedException {
-      
       Scan scan = sourceTableHash.initScan();
       scan.setStartRow(startRow.copyBytes());
       scan.setStopRow(stopRow.copyBytes());
-      
+
       ResultScanner sourceScanner = sourceTable.getScanner(scan);
       CellScanner sourceCells = new CellScanner(sourceScanner.iterator());
 
-      ResultScanner targetScanner = targetTable.getScanner(scan);
+      ResultScanner targetScanner = targetTable.getScanner(new Scan(scan));
       CellScanner targetCells = new CellScanner(targetScanner.iterator());
-      
+
       boolean rangeMatched = true;
       byte[] nextSourceRow = sourceCells.nextRow();
       byte[] nextTargetRow = targetCells.nextRow();
@@ -327,7 +326,7 @@ public class SyncTable extends Configured implements Tool {
             LOG.info("Target missing row: " + Bytes.toHex(nextSourceRow));
           }
           context.getCounter(Counter.TARGETMISSINGROWS).increment(1);
-          
+
           rowMatched = syncRowCells(context, nextSourceRow, sourceCells, EMPTY_CELL_SCANNER);
           nextSourceRow = sourceCells.nextRow();  // advance only source to next row
         } else if (rowComparison > 0) {
@@ -335,41 +334,41 @@ public class SyncTable extends Configured implements Tool {
             LOG.info("Source missing row: " + Bytes.toHex(nextTargetRow));
           }
           context.getCounter(Counter.SOURCEMISSINGROWS).increment(1);
-          
+
           rowMatched = syncRowCells(context, nextTargetRow, EMPTY_CELL_SCANNER, targetCells);
           nextTargetRow = targetCells.nextRow();  // advance only target to next row
         } else {
           // current row is the same on both sides, compare cell by cell
           rowMatched = syncRowCells(context, nextSourceRow, sourceCells, targetCells);
-          nextSourceRow = sourceCells.nextRow();  
+          nextSourceRow = sourceCells.nextRow();
           nextTargetRow = targetCells.nextRow();
         }
-        
+
         if (!rowMatched) {
           rangeMatched = false;
         }
       }
-      
+
       sourceScanner.close();
       targetScanner.close();
-      
+
       context.getCounter(rangeMatched ? Counter.RANGESMATCHED : Counter.RANGESNOTMATCHED)
         .increment(1);
     }
-    
+
     private static class CellScanner {
       private final Iterator<Result> results;
-      
+
       private byte[] currentRow;
       private Result currentRowResult;
       private int nextCellInRow;
-      
+
       private Result nextRowResult;
-      
+
       public CellScanner(Iterator<Result> results) {
         this.results = results;
       }
-      
+
       /**
        * Advance to the next row and return its row key.
        * Returns null iff there are no more rows.
@@ -390,7 +389,7 @@ public class SyncTable extends Configured implements Tool {
               nextRowResult = null;
             }
           }
-          
+
           if (nextRowResult == null) {
             // end of data, no more rows
             currentRowResult = null;
@@ -398,7 +397,7 @@ public class SyncTable extends Configured implements Tool {
             return null;
           }
         }
-        
+
         // advance to cached result for next row
         currentRowResult = nextRowResult;
         nextCellInRow = 0;
@@ -406,7 +405,7 @@ public class SyncTable extends Configured implements Tool {
         nextRowResult = null;
         return currentRow;
       }
-      
+
       /**
        * Returns the next Cell in the current row or null iff none remain.
        */
@@ -415,7 +414,7 @@ public class SyncTable extends Configured implements Tool {
           // nothing left in current row
           return null;
         }
-        
+
         Cell nextCell = currentRowResult.rawCells()[nextCellInRow];
         nextCellInRow++;
         if (nextCellInRow == currentRowResult.size()) {
@@ -441,7 +440,7 @@ public class SyncTable extends Configured implements Tool {
         return nextCell;
       }
     }
-       
+
     /**
      * Compare the cells for the given row from the source and target tables.
      * Count and log any differences.
@@ -465,14 +464,14 @@ public class SyncTable extends Configured implements Tool {
           }
           context.getCounter(Counter.TARGETMISSINGCELLS).increment(1);
           matchingRow = false;
-          
+
           if (!dryRun) {
             if (put == null) {
               put = new Put(rowKey);
             }
             put.add(sourceCell);
           }
-          
+
           sourceCell = sourceCells.nextCellInRow();
         } else if (cellKeyComparison > 0) {
           if (LOG.isDebugEnabled()) {
@@ -480,7 +479,7 @@ public class SyncTable extends Configured implements Tool {
           }
           context.getCounter(Counter.SOURCEMISSINGCELLS).increment(1);
           matchingRow = false;
-          
+
           if (!dryRun) {
             if (delete == null) {
               delete = new Delete(rowKey);
@@ -489,7 +488,7 @@ public class SyncTable extends Configured implements Tool {
             delete.addColumn(CellUtil.cloneFamily(targetCell),
                 CellUtil.cloneQualifier(targetCell), targetCell.getTimestamp());
           }
-          
+
           targetCell = targetCells.nextCellInRow();
         } else {
           // the cell keys are equal, now check values
@@ -507,7 +506,7 @@ public class SyncTable extends Configured implements Tool {
             }
             context.getCounter(Counter.DIFFERENTCELLVALUES).increment(1);
             matchingRow = false;
-            
+
             if (!dryRun) {
               // overwrite target cell
               if (put == null) {
@@ -519,7 +518,7 @@ public class SyncTable extends Configured implements Tool {
           sourceCell = sourceCells.nextCellInRow();
           targetCell = targetCells.nextCellInRow();
         }
-        
+
         if (!dryRun && sourceTableHash.scanBatch > 0) {
           if (put != null && put.size() >= sourceTableHash.scanBatch) {
             context.write(new ImmutableBytesWritable(rowKey), put);
@@ -531,7 +530,7 @@ public class SyncTable extends Configured implements Tool {
           }
         }
       }
-      
+
       if (!dryRun) {
         if (put != null) {
           context.write(new ImmutableBytesWritable(rowKey), put);
@@ -540,7 +539,7 @@ public class SyncTable extends Configured implements Tool {
           context.write(new ImmutableBytesWritable(rowKey), delete);
         }
       }
-      
+
       if (matchingCells > 0) {
         context.getCounter(Counter.MATCHINGCELLS).increment(matchingCells);
       }
@@ -580,21 +579,21 @@ public class SyncTable extends Configured implements Tool {
       if (c2 == null) {
         return -1; // target missing cell
       }
-      
+
       int result = CellComparator.compareFamilies(c1, c2);
       if (result != 0) {
         return result;
       }
-      
+
       result = CellComparator.compareQualifiers(c1, c2);
       if (result != 0) {
         return result;
       }
-      
+
       // note timestamp comparison is inverted - more recent cells first
       return CellComparator.compareTimestamps(c1, c2);
     }
-     
+
     @Override
     protected void cleanup(Context context)
         throws IOException, InterruptedException {
@@ -605,7 +604,7 @@ public class SyncTable extends Configured implements Tool {
           mapperException = t;
         }
       }
-      
+
       try {
         sourceTable.close();
         targetTable.close();
@@ -618,7 +617,7 @@ public class SyncTable extends Configured implements Tool {
           LOG.error("Suppressing exception from closing tables", t);
         }
       }
-      
+
       // propagate first exception
       if (mapperException != null) {
         Throwables.propagateIfInstanceOf(mapperException, IOException.class);
@@ -638,7 +637,7 @@ public class SyncTable extends Configured implements Tool {
           && (nextSourceKey.compareTo(splitEndRow) < 0 || reachedEndOfTable))
{
         moveToNextBatch(context);
       }
-      
+
       if (targetHasher.isBatchStarted()) {
         // need to complete the final open hash batch
 
@@ -653,7 +652,7 @@ public class SyncTable extends Configured implements Tool {
           } else {
             scan.setStopRow(nextSourceKey.copyBytes());
           }
-          
+
           ResultScanner targetScanner = null;
           try {
             targetScanner = targetTable.getScanner(scan);
@@ -671,7 +670,7 @@ public class SyncTable extends Configured implements Tool {
       }
     }
   }
-  
+
   private static final int NUM_ARGS = 3;
   private static void printUsage(final String errorMsg) {
     if (errorMsg != null && errorMsg.length() > 0) {
@@ -681,7 +680,7 @@ public class SyncTable extends Configured implements Tool {
     System.err.println("Usage: SyncTable [options] <sourcehashdir> <sourcetable>
<targettable>");
     System.err.println();
     System.err.println("Options:");
-    
+
     System.err.println(" sourcezkcluster  ZK cluster key of the source table");
     System.err.println("                  (defaults to cluster in classpath's config)");
     System.err.println(" targetzkcluster  ZK cluster key of the target table");
@@ -703,7 +702,7 @@ public class SyncTable extends Configured implements Tool {
         + " --sourcezkcluster=zk1.example.com,zk2.example.com,zk3.example.com:2181:/hbase"
         + " hdfs://nn:9000/hashes/tableA tableA tableA");
   }
-  
+
   private boolean doCommandLine(final String[] args) {
     if (args.length < NUM_ARGS) {
       printUsage(null);
@@ -713,37 +712,37 @@ public class SyncTable extends Configured implements Tool {
       sourceHashDir = new Path(args[args.length - 3]);
       sourceTableName = args[args.length - 2];
       targetTableName = args[args.length - 1];
-            
+
       for (int i = 0; i < args.length - NUM_ARGS; i++) {
         String cmd = args[i];
         if (cmd.equals("-h") || cmd.startsWith("--h")) {
           printUsage(null);
           return false;
         }
-        
+
         final String sourceZkClusterKey = "--sourcezkcluster=";
         if (cmd.startsWith(sourceZkClusterKey)) {
           sourceZkCluster = cmd.substring(sourceZkClusterKey.length());
           continue;
         }
-        
+
         final String targetZkClusterKey = "--targetzkcluster=";
         if (cmd.startsWith(targetZkClusterKey)) {
           targetZkCluster = cmd.substring(targetZkClusterKey.length());
           continue;
         }
-        
+
         final String dryRunKey = "--dryrun=";
         if (cmd.startsWith(dryRunKey)) {
           dryRun = Boolean.parseBoolean(cmd.substring(dryRunKey.length()));
           continue;
         }
-        
+
         printUsage("Invalid argument '" + cmd + "'");
         return false;
       }
 
-      
+
     } catch (Exception e) {
       e.printStackTrace();
       printUsage("Can't start because " + e.getMessage());
@@ -751,7 +750,7 @@ public class SyncTable extends Configured implements Tool {
     }
     return true;
   }
-  
+
   /**
    * Main entry point.
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/4456d228/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index c5a850b..7339d87 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -274,11 +274,13 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
     private final String scannerName;
     private final RegionScanner s;
     private final Region r;
+    private final boolean allowPartial;
 
-    public RegionScannerHolder(String scannerName, RegionScanner s, Region r) {
+    public RegionScannerHolder(String scannerName, RegionScanner s, Region r, boolean allowPartial)
{
       this.scannerName = scannerName;
       this.s = s;
       this.r = r;
+      this.allowPartial = allowPartial;
     }
 
     public long getNextCallSeq() {
@@ -1110,11 +1112,11 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
     return lastBlock;
   }
 
-  private RegionScannerHolder addScanner(String scannerName, RegionScanner s, Region r)
-      throws LeaseStillHeldException {
+  private RegionScannerHolder addScanner(String scannerName, RegionScanner s, Region r,
+      boolean allowPartial) throws LeaseStillHeldException {
     regionServer.leases.createLease(scannerName, this.scannerLeaseTimeoutPeriod,
       new ScannerListener(scannerName));
-    RegionScannerHolder rsh = new RegionScannerHolder(scannerName, s, r);
+    RegionScannerHolder rsh = new RegionScannerHolder(scannerName, s, r, allowPartial);
     RegionScannerHolder existing = scanners.putIfAbsent(scannerName, rsh);
     assert existing == null : "scannerId must be unique within regionserver's whole lifecycle!";
     return rsh;
@@ -2460,8 +2462,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
     return rsh;
   }
 
-  private Pair<RegionScannerHolder, Boolean> newRegionScanner(ScanRequest request,
-      ScanResponse.Builder builder) throws IOException {
+  private RegionScannerHolder newRegionScanner(ScanRequest request, ScanResponse.Builder
builder)
+      throws IOException {
     Region region = getRegion(request.getRegion());
     ClientProtos.Scan protoScan = request.getScan();
     boolean isLoadingCfsOnDemandSet = protoScan.hasLoadColumnFamiliesOnDemand();
@@ -2491,7 +2493,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
     builder.setMvccReadPoint(scanner.getMvccReadPoint());
     builder.setTtl(scannerLeaseTimeoutPeriod);
     String scannerName = String.valueOf(scannerId);
-    return Pair.newPair(addScanner(scannerName, scanner, region), scan.isSmall());
+    return addScanner(scannerName, scanner, region,
+      !scan.isSmall() && !(request.hasLimitOfRows() && request.getLimitOfRows()
> 0));
   }
 
   private void checkScanNextCallSeq(ScanRequest request, RegionScannerHolder rsh)
@@ -2548,9 +2551,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
 
   // return whether we have more results in region.
   private boolean scan(PayloadCarryingRpcController controller, ScanRequest request,
-      RegionScannerHolder rsh, boolean isSmallScan, long maxQuotaResultSize, int rows,
-      List<Result> results, ScanResponse.Builder builder, MutableObject lastBlock,
-      RpcCallContext context) throws IOException {
+      RegionScannerHolder rsh, long maxQuotaResultSize, int rows, List<Result> results,
+      ScanResponse.Builder builder, MutableObject lastBlock, RpcCallContext context)
+      throws IOException {
     Region region = rsh.r;
     RegionScanner scanner = rsh.s;
     long maxResultSize;
@@ -2581,7 +2584,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
         // formed.
         boolean serverGuaranteesOrderOfPartials = results.isEmpty();
         boolean allowPartialResults =
-            clientHandlesPartials && serverGuaranteesOrderOfPartials && !isSmallScan;
+            clientHandlesPartials && serverGuaranteesOrderOfPartials && rsh.allowPartial;
         boolean moreRows = false;
 
         // Heartbeat messages occur when the processing of the ScanRequest is exceeds a
@@ -2738,15 +2741,11 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
     rpcScanRequestCount.increment();
     RegionScannerHolder rsh;
     ScanResponse.Builder builder = ScanResponse.newBuilder();
-    boolean isSmallScan;
     try {
       if (request.hasScannerId()) {
         rsh = getRegionScanner(request);
-        isSmallScan = false;
       } else {
-        Pair<RegionScannerHolder, Boolean> pair = newRegionScanner(request, builder);
-        rsh = pair.getFirst();
-        isSmallScan = pair.getSecond().booleanValue();
+        rsh = newRegionScanner(request, builder);
       }
     } catch (IOException e) {
       if (e == SCANNER_ALREADY_CLOSED) {
@@ -2805,6 +2804,15 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
     RegionScanner scanner = rsh.s;
     boolean moreResults = true;
     boolean moreResultsInRegion = true;
+    // this is the limit of rows for this scan, if we the number of rows reach this value,
we will
+    // close the scanner.
+    int limitOfRows;
+    if (request.hasLimitOfRows()) {
+      limitOfRows = request.getLimitOfRows();
+      rows = Math.min(rows, limitOfRows);
+    } else {
+      limitOfRows = -1;
+    }
     MutableObject lastBlock = new MutableObject();
     boolean scannerClosed = false;
     try {
@@ -2825,7 +2833,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
         }
         if (!done) {
           moreResultsInRegion = scan((PayloadCarryingRpcController) controller, request,
rsh,
-            isSmallScan, maxQuotaResultSize, rows, results, builder, lastBlock, context);
+            maxQuotaResultSize, rows, results, builder, lastBlock, context);
         }
       }
 
@@ -2837,6 +2845,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
         // with the old scan implementation where we just ignore the returned results if
moreResults
         // is false. Can remove the isEmpty check after we get rid of the old implementation.
         moreResults = false;
+      } else if (limitOfRows > 0 && results.size() >= limitOfRows &&
+          !results.get(results.size() - 1).isPartial()) {
+        // if we have reached the limit of rows
+        moreResults = false;
       }
       addResults(builder, results, (PayloadCarryingRpcController) controller,
         RegionReplicaUtil.isDefaultReplica(region.getRegionInfo()));

http://git-wip-us.apache.org/repos/asf/hbase/blob/4456d228/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index 4fa9875..419b7c5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -77,6 +77,7 @@ import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.Scan.ReadType;
 import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
 import org.apache.hadoop.hbase.tool.Canary.RegionTask.TaskType;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -365,7 +366,7 @@ public final class Canary implements Tool {
           scan.setFilter(new FirstKeyOnlyFilter());
           scan.addFamily(column.getName());
           scan.setMaxResultSize(1L);
-          scan.setSmall(true);
+          scan.setOneRowLimit();
         }
 
         if (LOG.isDebugEnabled()) {
@@ -500,7 +501,7 @@ public final class Canary implements Tool {
           scan.setFilter(new FirstKeyOnlyFilter());
           scan.setCaching(1);
           scan.setMaxResultSize(1L);
-          scan.setSmall(true);
+          scan.setOneRowLimit();
           stopWatch.start();
           ResultScanner s = table.getScanner(scan);
           s.next();

http://git-wip-us.apache.org/repos/asf/hbase/blob/4456d228/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index f3becfe..6b78598 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -2427,14 +2427,17 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility
{
    * Return the number of rows in the given table.
    */
   public int countRows(final Table table) throws IOException {
-    Scan scan = new Scan();
-    ResultScanner results = table.getScanner(scan);
-    int count = 0;
-    for (@SuppressWarnings("unused") Result res : results) {
-      count++;
+    return countRows(table, new Scan());
+  }
+
+  public int countRows(final Table table, final Scan scan) throws IOException {
+    try (ResultScanner results = table.getScanner(scan)) {
+      int count = 0;
+      while (results.next() != null) {
+        count++;
+      }
+      return count;
     }
-    results.close();
-    return count;
   }
 
   public int countRows(final Table table, final byte[]... families) throws IOException {
@@ -2442,13 +2445,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility
{
     for (byte[] family: families) {
       scan.addFamily(family);
     }
-    ResultScanner results = table.getScanner(scan);
-    int count = 0;
-    for (@SuppressWarnings("unused") Result res : results) {
-      count++;
-    }
-    results.close();
-    return count;
+    return countRows(table, scan);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/4456d228/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java
index 3bf91a4..df38b8e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java
@@ -166,7 +166,7 @@ public class TestMetaTableAccessorNoCluster {
             public ScanResponse answer(InvocationOnMock invocation) throws Throwable {
               ((PayloadCarryingRpcController) invocation.getArguments()[0]).setCellScanner(CellUtil
                   .createCellScanner(cellScannables));
-              return builder.build();
+              return builder.setScannerId(1234567890L).build();
             }
           }).thenReturn(ScanResponse.newBuilder().setMoreResults(false).build());
       // Associate a spied-upon HConnection with UTIL.getConfiguration.  Need

http://git-wip-us.apache.org/repos/asf/hbase/blob/4456d228/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java
index ed8e848..181d55a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java
@@ -607,7 +607,6 @@ public class TestPartialResultsFromClientSide {
     scan.setAllowPartialResults(true);
     scan.setSmall(true);
     scan.setMaxResultSize(1);
-
     ResultScanner scanner = TABLE.getScanner(scan);
     Result r = null;
 
@@ -731,11 +730,13 @@ public class TestPartialResultsFromClientSide {
     byte[] value = Bytes.createMaxByteArray(100);
 
     Table tmpTable = createTestTable(testName, rows, families, qualifiers, value);
-
     // Open scanner before deletes
     ResultScanner scanner =
         tmpTable.getScanner(new Scan().setMaxResultSize(1).setAllowPartialResults(true));
-
+    // now the openScanner will also fetch data and will be executed lazily, i.e, only openScanner
+    // when you call next, so here we need to make a next call to open scanner. The maxResultSize
+    // limit can make sure that we will not fetch all the data at once, so the test sill
works.
+    int scannerCount = scanner.next().rawCells().length;
     Delete delete1 = new Delete(rows[0]);
     delete1.addColumn(families[0], qualifiers[0], 0);
     tmpTable.delete(delete1);
@@ -745,7 +746,7 @@ public class TestPartialResultsFromClientSide {
     tmpTable.delete(delete2);
 
     // Should see all cells because scanner was opened prior to deletes
-    int scannerCount = countCellsFromScanner(scanner);
+    scannerCount += countCellsFromScanner(scanner);
     int expectedCount = numRows * numFamilies * numQualifiers;
     assertTrue("scannerCount: " + scannerCount + " expectedCount: " + expectedCount,
         scannerCount == expectedCount);
@@ -758,6 +759,7 @@ public class TestPartialResultsFromClientSide {
         scannerCount == expectedCount);
 
     scanner = tmpTable.getScanner(new Scan().setMaxResultSize(1).setAllowPartialResults(true));
+    scannerCount = scanner.next().rawCells().length;
     // Put in 2 new rows. The timestamps differ from the deleted rows
     Put put1 = new Put(rows[0]);
     put1.add(new KeyValue(rows[0], families[0], qualifiers[0], 1, value));
@@ -768,7 +770,7 @@ public class TestPartialResultsFromClientSide {
     tmpTable.put(put2);
 
     // Scanner opened prior to puts. Cell count shouldn't have changed
-    scannerCount = countCellsFromScanner(scanner);
+    scannerCount += countCellsFromScanner(scanner);
     expectedCount = numRows * numFamilies * numQualifiers - 2;
     assertTrue("scannerCount: " + scannerCount + " expectedCount: " + expectedCount,
         scannerCount == expectedCount);

http://git-wip-us.apache.org/repos/asf/hbase/blob/4456d228/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java
index 4a3b152..803a81d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java
@@ -85,9 +85,11 @@ public class TestClientScannerRPCTimeout {
   public void testScannerNextRPCTimesout() throws Exception {
     final TableName TABLE_NAME = TableName.valueOf("testScannerNextRPCTimesout");
     Table ht = TEST_UTIL.createTable(TABLE_NAME, FAMILY);
+    byte[] r0 = Bytes.toBytes("row-0");
     byte[] r1 = Bytes.toBytes("row-1");
     byte[] r2 = Bytes.toBytes("row-2");
     byte[] r3 = Bytes.toBytes("row-3");
+    putToTable(ht, r0);
     putToTable(ht, r1);
     putToTable(ht, r2);
     putToTable(ht, r3);
@@ -97,6 +99,9 @@ public class TestClientScannerRPCTimeout {
     scan.setCaching(1);
     ResultScanner scanner = ht.getScanner(scan);
     Result result = scanner.next();
+    // fetched when openScanner
+    assertTrue("Expected row: row-0", Bytes.equals(r0, result.getRow()));
+    result = scanner.next();
     assertTrue("Expected row: row-1", Bytes.equals(r1, result.getRow()));
     LOG.info("Got expected first row");
     long t1 = System.currentTimeMillis();

http://git-wip-us.apache.org/repos/asf/hbase/blob/4456d228/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index 69fdf6a..713688c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -6108,7 +6108,7 @@ public class TestFromClientSide {
   public void testReversedScanUnderMultiRegions() throws Exception {
     // Test Initialization.
     TableName TABLE = TableName.valueOf("testReversedScanUnderMultiRegions");
-    byte[] maxByteArray = ReversedClientScanner.MAX_BYTE_ARRAY;
+    byte[] maxByteArray = ConnectionUtils.MAX_BYTE_ARRAY;
     byte[][] splitRows = new byte[][] { Bytes.toBytes("005"),
         Bytes.add(Bytes.toBytes("005"), Bytes.multiple(maxByteArray, 16)),
         Bytes.toBytes("006"),

http://git-wip-us.apache.org/repos/asf/hbase/blob/4456d228/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLeaseRenewal.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLeaseRenewal.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLeaseRenewal.java
index 3333f54..288872e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLeaseRenewal.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLeaseRenewal.java
@@ -106,8 +106,8 @@ public class TestLeaseRenewal {
     Scan s = new Scan();
     s.setCaching(1);
     ResultScanner rs = table.getScanner(s);
-    // make sure that calling renewLease does not impact the scan results
-    assertTrue(((AbstractClientScanner)rs).renewLease());
+    // we haven't open the scanner yet so nothing happens
+    assertFalse(((AbstractClientScanner) rs).renewLease());
     assertTrue(Arrays.equals(rs.next().getRow(), ANOTHERROW));
     // renew the lease a few times, long enough to be sure
     // the lease would have expired otherwise

http://git-wip-us.apache.org/repos/asf/hbase/blob/4456d228/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
index e2ca677..261915e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
@@ -169,7 +169,7 @@ public class TestRegionServerMetrics {
 
     // By default, master doesn't host meta now.
     // Adding some meta related requests
-    requests += 3;
+    requests += 1;
     readRequests ++;
 
     metricsRegionServer.getRegionServerWrapper().forceRecompute();

http://git-wip-us.apache.org/repos/asf/hbase/blob/4456d228/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java
index 1d7ec46..6febd45 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java
@@ -21,8 +21,6 @@ import java.io.IOException;
 import java.util.List;
 import java.util.concurrent.CountDownLatch;
 
-import junit.framework.Assert;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -46,6 +44,7 @@ import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.AfterClass;
+import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -199,6 +198,7 @@ public class TestScannerWithBulkload {
     HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
     createTable(admin, tableName);
     Scan scan = createScan();
+    scan.setCaching(1);
     final HTable table = init(admin, l, scan, tableName);
     // use bulkload
     final Path hfilePath = writeToHFile(l, "/temp/testBulkLoadWithParallelScan/",
@@ -207,6 +207,7 @@ public class TestScannerWithBulkload {
     conf.setBoolean("hbase.mapreduce.bulkload.assign.sequenceNumbers", true);
     final LoadIncrementalHFiles bulkload = new LoadIncrementalHFiles(conf);
     ResultScanner scanner = table.getScanner(scan);
+    Result result = scanner.next();
     // Create a scanner and then do bulk load
     final CountDownLatch latch = new CountDownLatch(1);
     new Thread() {
@@ -226,7 +227,6 @@ public class TestScannerWithBulkload {
     latch.await();
     // By the time we do next() the bulk loaded files are also added to the kv
     // scanner
-    Result result = scanner.next();
     scanAfterBulkLoad(scanner, result, "version1");
     scanner.close();
     table.close();

http://git-wip-us.apache.org/repos/asf/hbase/blob/4456d228/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index 79d65cd..1c1ab39 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -947,7 +947,6 @@ public class TestAccessController extends SecureTestUtil {
             for (Result r = scanner.next(); r != null; r = scanner.next()) {
               // do nothing
             }
-          } catch (IOException e) {
           } finally {
             scanner.close();
           }

http://git-wip-us.apache.org/repos/asf/hbase/blob/4456d228/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java
index d400fa6..ec8bc95 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java
@@ -381,6 +381,7 @@ public class TestAccessController2 extends SecureTestUtil {
           Scan s1 = new Scan();
           s1.addFamily(TEST_FAMILY_2);
           try (ResultScanner scanner1 = table.getScanner(s1);) {
+            scanner1.next();
           }
         }
         return null;
@@ -411,6 +412,7 @@ public class TestAccessController2 extends SecureTestUtil {
           Scan s1 = new Scan();
           s1.addFamily(TEST_FAMILY_2);
           try (ResultScanner scanner1 = table.getScanner(s1);) {
+            scanner1.next();
           }
         }
         return null;
@@ -425,6 +427,7 @@ public class TestAccessController2 extends SecureTestUtil {
           Scan s1 = new Scan();
           s1.addColumn(TEST_FAMILY, Q2);
           try (ResultScanner scanner1 = table.getScanner(s1);) {
+            scanner1.next();
           }
         }
         return null;


Mime
View raw message