hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject hbase git commit: HBASE-13030 [1.0.0 polish] Make ScanMetrics public again and align Put 'add' with Get, Delete, etc., addColumn
Date Fri, 13 Feb 2015 23:24:46 GMT
Repository: hbase
Updated Branches:
  refs/heads/master 76a3b50f1 -> 3babad30e


HBASE-13030 [1.0.0 polish] Make ScanMetrics public again and align Put 'add' with Get, Delete,
etc., addColumn


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3babad30
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3babad30
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3babad30

Branch: refs/heads/master
Commit: 3babad30e6d1953115f432532f85a143eaf431ad
Parents: 76a3b50
Author: stack <stack@apache.org>
Authored: Fri Feb 13 15:24:22 2015 -0800
Committer: stack <stack@apache.org>
Committed: Fri Feb 13 15:24:38 2015 -0800

----------------------------------------------------------------------
 .../hbase/client/AbstractClientScanner.java     | 15 ++++---
 .../hadoop/hbase/client/ClientScanner.java      |  6 +--
 .../org/apache/hadoop/hbase/client/Put.java     | 46 +++++++++++++++++++-
 .../org/apache/hadoop/hbase/client/Result.java  |  9 +++-
 .../org/apache/hadoop/hbase/client/Scan.java    | 42 +++++++++++++++++-
 .../hbase/client/metrics/ScanMetrics.java       | 11 ++---
 .../java/org/apache/hadoop/hbase/CellUtil.java  |  4 ++
 .../hadoop/hbase/rest/client/RemoteHTable.java  | 23 +++++++++-
 .../hbase/mapreduce/TableRecordReaderImpl.java  | 11 ++---
 .../hbase/regionserver/RSRpcServices.java       |  1 -
 .../hadoop/hbase/ScanPerformanceEvaluation.java |  7 +--
 .../hadoop/hbase/client/TestFromClientSide.java | 39 +++++++++--------
 .../TestRegionObserverInterface.java            | 29 ++++++------
 13 files changed, 172 insertions(+), 71 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/3babad30/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java
index 54c97d7..dc325a3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java
@@ -23,14 +23,12 @@ import java.util.Iterator;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
-import org.apache.hadoop.hbase.util.Bytes;
 
 /**
  * Helper class for custom client scanners.
  */
 @InterfaceAudience.Private
 public abstract class AbstractClientScanner implements ResultScanner {
-
   protected ScanMetrics scanMetrics;
 
   /**
@@ -38,14 +36,19 @@ public abstract class AbstractClientScanner implements ResultScanner {
    */
   protected void initScanMetrics(Scan scan) {
     // check if application wants to collect scan metrics
-    byte[] enableMetrics = scan.getAttribute(
-      Scan.SCAN_ATTRIBUTES_METRICS_ENABLE);
-    if (enableMetrics != null && Bytes.toBoolean(enableMetrics)) {
+    if (scan.isScanMetricsEnabled()) {
       scanMetrics = new ScanMetrics();
     }
   }
 
-  // TODO: should this be at ResultScanner? ScanMetrics is not public API it seems.
+  /**
+   * Used internally accumulating metrics on scan. To
+   * enable collection of metrics on a Scanner, call {@link Scan#setScanMetricsEnabled(boolean)}.
+   * These metrics are cleared at key transition points. Metrics are accumulated in the
+   * {@link Scan} object itself.
+   * @see Scan#getScanMetrics()
+   * @return Returns the running {@link ScanMetrics} instance or null if scan metrics not
enabled.
+   */
   public ScanMetrics getScanMetrics() {
     return scanMetrics;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3babad30/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
index d31642a..110b039 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
@@ -317,9 +317,9 @@ public class ClientScanner extends AbstractClientScanner {
      * machine; for scan/map reduce scenarios, we will have multiple scans running at the
same time.
      *
      * By default, scan metrics are disabled; if the application wants to collect them, this
-     * behavior can be turned on by calling calling:
-     *
-     * scan.setAttribute(SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.TRUE))
+     * behavior can be turned on by calling calling {@link Scan#setScanMetricsEnabled(boolean)}
+     * 
+     * <p>This invocation clears the scan metrics. Metrics are aggregated in the Scan
instance.
      */
     protected void writeScanMetrics() {
       if (this.scanMetrics == null || scanMetricsPublished) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/3babad30/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
index b9d652d..364783f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
@@ -137,9 +137,22 @@ public class Put extends Mutation implements HeapSize, Comparable<Row>
{
    * @param qualifier column qualifier
    * @param value column value
    * @return this
+   * @deprecated Since 1.0.0. Use {@link #addColumn(byte[], byte[], byte[])}
    */
+  @Deprecated
   public Put add(byte [] family, byte [] qualifier, byte [] value) {
-    return add(family, qualifier, this.ts, value);
+    return addColumn(family, qualifier, value);
+  }
+
+  /**
+   * Add the specified column and value to this Put operation.
+   * @param family family name
+   * @param qualifier column qualifier
+   * @param value column value
+   * @return this
+   */
+  public Put addColumn(byte [] family, byte [] qualifier, byte [] value) {
+    return addColumn(family, qualifier, this.ts, value);
   }
 
   /**
@@ -167,8 +180,23 @@ public class Put extends Mutation implements HeapSize, Comparable<Row>
{
    * @param ts version timestamp
    * @param value column value
    * @return this
+   * @deprecated Since 1.0.0. Use {@link #addColumn(byte[], byte[], long, byte[])}
    */
+  @Deprecated
   public Put add(byte [] family, byte [] qualifier, long ts, byte [] value) {
+    return addColumn(family, qualifier, ts, value);
+  }
+
+  /**
+   * Add the specified column and value, with the specified timestamp as
+   * its version to this Put operation.
+   * @param family family name
+   * @param qualifier column qualifier
+   * @param ts version timestamp
+   * @param value column value
+   * @return this
+   */
+  public Put addColumn(byte [] family, byte [] qualifier, long ts, byte [] value) {
     if (ts < 0) {
       throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + ts);
     }
@@ -199,7 +227,6 @@ public class Put extends Mutation implements HeapSize, Comparable<Row>
{
    * This expects that the underlying arrays won't change. It's intended
    * for usage internal HBase to and for advanced client applications.
    */
-  @SuppressWarnings("unchecked")
   public Put addImmutable(byte[] family, byte[] qualifier, long ts, byte[] value, Tag[] tag)
{
     List<Cell> list = getCellList(family);
     KeyValue kv = createPutKeyValue(family, qualifier, ts, value, tag);
@@ -233,8 +260,23 @@ public class Put extends Mutation implements HeapSize, Comparable<Row>
{
    * @param ts version timestamp
    * @param value column value
    * @return this
+   * @deprecated Since 1.0.0. Use {@link Put#addColumn(byte[], ByteBuffer, long, ByteBuffer)}
    */
+  @Deprecated
   public Put add(byte[] family, ByteBuffer qualifier, long ts, ByteBuffer value) {
+    return addColumn(family, qualifier, ts, value);
+  }
+
+  /**
+   * Add the specified column and value, with the specified timestamp as
+   * its version to this Put operation.
+   * @param family family name
+   * @param qualifier column qualifier
+   * @param ts version timestamp
+   * @param value column value
+   * @return this
+   */
+  public Put addColumn(byte[] family, ByteBuffer qualifier, long ts, ByteBuffer value) {
     if (ts < 0) {
       throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + ts);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3babad30/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
index faef0d3..c418e47 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
@@ -361,6 +361,9 @@ public class Result implements CellScannable, CellScanner {
 
   /**
    * Get the latest version of the specified column.
+   * Note: this call clones the value content of the hosting Cell. See
+   * {@link #getValueAsByteBuffer(byte[], byte[])}, etc., or {@link #listCells()} if you
would
+   * avoid the cloning.
    * @param family family name
    * @param qualifier column qualifier
    * @return value of latest version of column, null if none found
@@ -388,7 +391,8 @@ public class Result implements CellScannable, CellScanner {
     if (kv == null) {
       return null;
     }
-    return ByteBuffer.wrap(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength());
+    return ByteBuffer.wrap(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()).
+      asReadOnlyBuffer();
   }
 
   /**
@@ -411,7 +415,8 @@ public class Result implements CellScannable, CellScanner {
     if (kv == null) {
       return null;
     }
-    return ByteBuffer.wrap(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength());
+    return ByteBuffer.wrap(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()).
+      asReadOnlyBuffer();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/3babad30/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
index d2dd770..bfcfa20 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
@@ -34,9 +34,11 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.IncompatibleFilterException;
 import org.apache.hadoop.hbase.io.TimeRange;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.security.access.Permission;
 import org.apache.hadoop.hbase.security.visibility.Authorizations;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -117,9 +119,18 @@ public class Scan extends Query {
   private int storeOffset = 0;
   private boolean getScan;
 
-  // If application wants to collect scan metrics, it needs to
-  // call scan.setAttribute(SCAN_ATTRIBUTES_ENABLE, Bytes.toBytes(Boolean.TRUE))
+  /**
+   * @deprecated since 1.0.0. Use {@link #setScanMetricsEnabled(boolean)}
+   */
+  // Make private or remove.
+  @Deprecated
   static public final String SCAN_ATTRIBUTES_METRICS_ENABLE = "scan.attributes.metrics.enable";
+
+  /**
+   * Use {@link #getScanMetrics()}
+   */
+  // Make this private or remove.
+  @Deprecated
   static public final String SCAN_ATTRIBUTES_METRICS_DATA = "scan.attributes.metrics.data";
 
   // If an application wants to use multiple scans over different tables each scan must
@@ -916,4 +927,31 @@ public class Scan extends Query {
     scan.setCaching(1);
     return scan;
   }
+
+  /**
+   * Enable collection of {@link ScanMetrics}. For advanced users.
+   * @param enabled Set to true to enable accumulating scan metrics
+   */
+  public Scan setScanMetricsEnabled(final boolean enabled) {
+    setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.valueOf(enabled)));
+    return this;
+  }
+
+  /**
+   * @return True if collection of scan metrics is enabled. For advanced users.
+   */
+  public boolean isScanMetricsEnabled() {
+    byte[] attr = getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE);
+    return attr == null ? false : Bytes.toBoolean(attr);
+  }
+
+  /**
+   * @return Metrics on this Scan, if metrics were enabled.
+   * @see #setScanMetricsEnabled(boolean)
+   */
+  public ScanMetrics getScanMetrics() {
+    byte [] bytes = getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA);
+    if (bytes == null) return null;
+    return ProtobufUtil.toScanMetrics(bytes);
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/3babad30/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java
index 86bc120..35c6667 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java
@@ -22,15 +22,14 @@ import java.util.HashMap;
 import java.util.Map;
 import java.util.concurrent.atomic.AtomicLong;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
 
 import com.google.common.collect.ImmutableMap;
 
 
 /**
- * Provides client-side metrics related to scan operations
+ * Provides client-side metrics related to scan operations.
  * The data can be passed to mapreduce framework or other systems.
  * We use atomic longs so that one thread can increment,
  * while another atomically resets to zero after the values are reported
@@ -40,12 +39,10 @@ import com.google.common.collect.ImmutableMap;
  * However, there is no need for this. So they are defined under scan operation
  * for now.
  */
-@InterfaceAudience.Private
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
 public class ScanMetrics {
 
-
-  private static final Log LOG = LogFactory.getLog(ScanMetrics.class);
-
   /**
    * Hash to hold the String -> Atomic Long mappings.
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/3babad30/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
index fefe626..7b68eee 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
@@ -61,6 +61,10 @@ public final class CellUtil {
       cell.getQualifierLength());
   }
 
+  public static ByteRange fillValueRange(Cell cell, ByteRange range) {
+    return range.set(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
+  }
+
   public static ByteRange fillTagRange(Cell cell, ByteRange range) {
     return range.set(cell.getTagsArray(), cell.getTagsOffset(), cell.getTagsLength());
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3babad30/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
index eb5f506..0300ea2 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
@@ -250,10 +250,12 @@ public class RemoteHTable implements Table {
     return TableName.valueOf(name);
   }
 
+  @Override
   public Configuration getConfiguration() {
     return conf;
   }
 
+  @Override
   public HTableDescriptor getTableDescriptor() throws IOException {
     StringBuilder sb = new StringBuilder();
     sb.append('/');
@@ -282,10 +284,12 @@ public class RemoteHTable implements Table {
     throw new IOException("schema request timed out");
   }
 
+  @Override
   public void close() throws IOException {
     client.shutdown();
   }
 
+  @Override
   public Result get(Get get) throws IOException {
     TimeRange range = get.getTimeRange();
     String spec = buildRowSpec(get.getRow(), get.getFamilyMap(),
@@ -304,6 +308,7 @@ public class RemoteHTable implements Table {
     }
   }
 
+  @Override
   public Result[] get(List<Get> gets) throws IOException {
     byte[][] rows = new byte[gets.size()][];
     int maxVersions = 1;
@@ -360,6 +365,7 @@ public class RemoteHTable implements Table {
     throw new IOException("get request timed out");
   }
 
+  @Override
   public boolean exists(Get get) throws IOException {
     LOG.warn("exists() is really get(), just use get()");
     Result result = get(get);
@@ -370,6 +376,7 @@ public class RemoteHTable implements Table {
    * exists(List) is really a list of get() calls. Just use get().
    * @param gets list of Get to test for the existence
    */
+  @Override
   public boolean[] existsAll(List<Get> gets) throws IOException {
     LOG.warn("exists(List<Get>) is really list of get() calls, just use get()");
     boolean[] results = new boolean[gets.size()];
@@ -389,6 +396,7 @@ public class RemoteHTable implements Table {
     return objectResults;
   }
 
+  @Override
   public void put(Put put) throws IOException {
     CellSetModel model = buildModelFromPut(put);
     StringBuilder sb = new StringBuilder();
@@ -417,6 +425,7 @@ public class RemoteHTable implements Table {
     throw new IOException("put request timed out");
   }
 
+  @Override
   public void put(List<Put> puts) throws IOException {
     // this is a trick: The gateway accepts multiple rows in a cell set and
     // ignores the row specification in the URI
@@ -472,6 +481,7 @@ public class RemoteHTable implements Table {
     throw new IOException("multiput request timed out");
   }
 
+  @Override
   public void delete(Delete delete) throws IOException {
     String spec = buildRowSpec(delete.getRow(), delete.getFamilyCellMap(),
       delete.getTimeStamp(), delete.getTimeStamp(), 1);
@@ -495,6 +505,7 @@ public class RemoteHTable implements Table {
     throw new IOException("delete request timed out");
   }
 
+  @Override
   public void delete(List<Delete> deletes) throws IOException {
     for (Delete delete: deletes) {
       delete(delete);
@@ -632,19 +643,21 @@ public class RemoteHTable implements Table {
         LOG.warn(StringUtils.stringifyException(e));
       }
     }
-
   }
 
+  @Override
   public ResultScanner getScanner(Scan scan) throws IOException {
     return new Scanner(scan);
   }
 
+  @Override
   public ResultScanner getScanner(byte[] family) throws IOException {
     Scan scan = new Scan();
     scan.addFamily(family);
     return new Scanner(scan);
   }
 
+  @Override
   public ResultScanner getScanner(byte[] family, byte[] qualifier)
       throws IOException {
     Scan scan = new Scan();
@@ -660,6 +673,7 @@ public class RemoteHTable implements Table {
     throw new IOException("getRowOrBefore not supported");
   }
 
+  @Override
   public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
       byte[] value, Put put) throws IOException {
     // column to check-the-value
@@ -696,11 +710,13 @@ public class RemoteHTable implements Table {
     throw new IOException("checkAndPut request timed out");
   }
 
+  @Override
   public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
       CompareOp compareOp, byte[] value, Put put) throws IOException {
     throw new IOException("checkAndPut for non-equal comparison not implemented");
   }
 
+  @Override
   public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
       byte[] value, Delete delete) throws IOException {
     Put put = new Put(row);
@@ -737,24 +753,29 @@ public class RemoteHTable implements Table {
     throw new IOException("checkAndDelete request timed out");
   }
 
+  @Override
   public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
       CompareOp compareOp, byte[] value, Delete delete) throws IOException {
     throw new IOException("checkAndDelete for non-equal comparison not implemented");
   }
 
+  @Override
   public Result increment(Increment increment) throws IOException {
     throw new IOException("Increment not supported");
   }
 
+  @Override
   public Result append(Append append) throws IOException {
     throw new IOException("Append not supported");
   }
 
+  @Override
   public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
       long amount) throws IOException {
     throw new IOException("incrementColumnValue not supported");
   }
 
+  @Override
   public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
       long amount, Durability durability) throws IOException {
     throw new IOException("incrementColumnValue not supported");

http://git-wip-us.apache.org/repos/asf/hbase/blob/3babad30/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
index 47f6869..06fa712 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.client.ScannerCallable;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.mapreduce.Counter;
 import org.apache.hadoop.mapreduce.InputSplit;
@@ -80,8 +79,7 @@ public class TableRecordReaderImpl {
   public void restart(byte[] firstRow) throws IOException {
     currentScan = new Scan(scan);
     currentScan.setStartRow(firstRow);
-    currentScan.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE,
-      Bytes.toBytes(Boolean.TRUE));
+    currentScan.setScanMetricsEnabled(true);
     if (this.scanner != null) {
       if (logScannerActivity) {
         LOG.info("Closing the previously opened scanner object.");
@@ -265,14 +263,11 @@ public class TableRecordReaderImpl {
    * @throws IOException
    */
   private void updateCounters() throws IOException {
-    byte[] serializedMetrics = currentScan.getAttribute(
-        Scan.SCAN_ATTRIBUTES_METRICS_DATA);
-    if (serializedMetrics == null || serializedMetrics.length == 0 ) {
+    ScanMetrics scanMetrics = this.scan.getScanMetrics();
+    if (scanMetrics == null) {
       return;
     }
 
-    ScanMetrics scanMetrics = ProtobufUtil.toScanMetrics(serializedMetrics);
-
     updateCounters(scanMetrics, numRestarts, getCounter, context, numStale);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3babad30/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 3944ae8..62c3305 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -2073,7 +2073,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
         if (!isLoadingCfsOnDemandSet) {
           scan.setLoadColumnFamiliesOnDemand(region.isLoadingCfsOnDemandDefault());
         }
-        scan.getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE);
         region.prepareScanner(scan);
         if (region.getCoprocessorHost() != null) {
           scanner = region.getCoprocessorHost().preScannerOpen(scan);

http://git-wip-us.apache.org/repos/asf/hbase/blob/3babad30/hbase-server/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java
index 0c331b7..24e9590 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java
@@ -29,7 +29,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
@@ -39,9 +38,7 @@ import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
 import org.apache.hadoop.hbase.mapreduce.TableMapper;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.util.AbstractHBaseTool;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.Counters;
@@ -137,7 +134,7 @@ public class ScanPerformanceEvaluation extends AbstractHBaseTool {
     Scan scan = new Scan(); // default scan settings
     scan.setCacheBlocks(false);
     scan.setMaxVersions(1);
-    scan.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.TRUE));
+    scan.setScanMetricsEnabled(true);
     if (caching != null) {
       scan.setCaching(Integer.parseInt(caching));
     }
@@ -177,7 +174,7 @@ public class ScanPerformanceEvaluation extends AbstractHBaseTool {
     table.close();
     connection.close();
 
-    ScanMetrics metrics = ProtobufUtil.toScanMetrics(scan.getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA));
+    ScanMetrics metrics = scan.getScanMetrics();
     long totalBytes = metrics.countOfBytesInResults.get();
     double throughput = (double)totalBytes / scanTimer.elapsedTime(TimeUnit.SECONDS);
     double throughputRows = (double)numRows / scanTimer.elapsedTime(TimeUnit.SECONDS);

http://git-wip-us.apache.org/repos/asf/hbase/blob/3babad30/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index c77ab29..49a6036 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -5003,37 +5003,39 @@ public class TestFromClientSide {
 
     Scan scan1 = new Scan();
     int numRecords = 0;
-    for(Result result : ht.getScanner(scan1)) {
+    ResultScanner scanner = ht.getScanner(scan1);
+    for(Result result : scanner) {
       numRecords++;
     }
+    scanner.close();
     LOG.info("test data has " + numRecords + " records.");
 
     // by default, scan metrics collection is turned off
-    assertEquals(null, scan1.getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA));
+    assertEquals(null, scan1.getScanMetrics());
 
     // turn on scan metrics
-    Scan scan = new Scan();
-    scan.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.TRUE));
-    scan.setCaching(numRecords+1);
-    ResultScanner scanner = ht.getScanner(scan);
+    Scan scan2 = new Scan();
+    scan2.setScanMetricsEnabled(true);
+    scan2.setCaching(numRecords+1);
+    scanner = ht.getScanner(scan2);
     for (Result result : scanner.next(numRecords - 1)) {
     }
     scanner.close();
     // closing the scanner will set the metrics.
-    assertNotNull(scan.getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA));
+    assertNotNull(scan2.getScanMetrics());
 
-    // set caching to 1, becasue metrics are collected in each roundtrip only
-    scan = new Scan();
-    scan.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.TRUE));
-    scan.setCaching(1);
-    scanner = ht.getScanner(scan);
+    // set caching to 1, because metrics are collected in each roundtrip only
+    scan2 = new Scan();
+    scan2.setScanMetricsEnabled(true);
+    scan2.setCaching(1);
+    scanner = ht.getScanner(scan2);
     // per HBASE-5717, this should still collect even if you don't run all the way to
     // the end of the scanner. So this is asking for 2 of the 3 rows we inserted.
     for (Result result : scanner.next(numRecords - 1)) {
     }
     scanner.close();
 
-    ScanMetrics scanMetrics = getScanMetrics(scan);
+    ScanMetrics scanMetrics = scan2.getScanMetrics();
     assertEquals("Did not access all the regions in the table", numOfRegions,
         scanMetrics.countOfRegions.get());
 
@@ -5041,7 +5043,7 @@ public class TestFromClientSide {
     // run past the end of all the records
     Scan scanWithoutClose = new Scan();
     scanWithoutClose.setCaching(1);
-    scanWithoutClose.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.TRUE));
+    scanWithoutClose.setScanMetricsEnabled(true);
     ResultScanner scannerWithoutClose = ht.getScanner(scanWithoutClose);
     for (Result result : scannerWithoutClose.next(numRecords + 1)) {
     }
@@ -5054,7 +5056,7 @@ public class TestFromClientSide {
     Scan scanWithClose = new Scan();
     // make sure we can set caching up to the number of a scanned values
     scanWithClose.setCaching(numRecords);
-    scanWithClose.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.TRUE));
+    scan2.setScanMetricsEnabled(true);
     ResultScanner scannerWithClose = ht.getScanner(scanWithClose);
     for (Result result : scannerWithClose.next(numRecords + 1)) {
     }
@@ -5068,7 +5070,6 @@ public class TestFromClientSide {
     byte[] serializedMetrics = scan.getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA);
     assertTrue("Serialized metrics were not found.", serializedMetrics != null);
 
-
     ScanMetrics scanMetrics = ProtobufUtil.toScanMetrics(serializedMetrics);
 
     return scanMetrics;
@@ -5209,10 +5210,10 @@ public class TestFromClientSide {
       // Verify region location before move.
       HRegionLocation addrCache = table.getRegionLocation(regionInfo.getStartKey(), false);
       HRegionLocation addrNoCache = table.getRegionLocation(regionInfo.getStartKey(),  true);
-  
+
       assertEquals(addrBefore.getPort(), addrCache.getPort());
       assertEquals(addrBefore.getPort(), addrNoCache.getPort());
-  
+
       ServerName addrAfter = null;
       // Now move the region to a different server.
       for (int i = 0; i < SLAVES; i++) {
@@ -5227,7 +5228,7 @@ public class TestFromClientSide {
           break;
         }
       }
-  
+
       // Verify the region was moved.
       addrCache = table.getRegionLocation(regionInfo.getStartKey(), false);
       addrNoCache = table.getRegionLocation(regionInfo.getStartKey(), true);

http://git-wip-us.apache.org/repos/asf/hbase/blob/3babad30/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
index 6895fbe..bf5227b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
@@ -112,7 +112,7 @@ public class TestRegionObserverInterface {
     util.shutdownMiniCluster();
   }
 
-  @Test
+  @Test (timeout=300000)
   public void testRegionObserver() throws IOException {
     TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testRegionObserver");
     // recreate table every time in order to reset the status of the
@@ -176,7 +176,7 @@ public class TestRegionObserverInterface {
         new Integer[] {1, 1, 1, 1});
   }
 
-  @Test
+  @Test (timeout=300000)
   public void testRowMutation() throws IOException {
     TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testRowMutation");
     Table table = util.createTable(tableName, new byte[][] {A, B, C});
@@ -213,7 +213,7 @@ public class TestRegionObserverInterface {
     }
   }
 
-  @Test
+  @Test (timeout=300000)
   public void testIncrementHook() throws IOException {
     TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testIncrementHook");
     Table table = util.createTable(tableName, new byte[][] {A, B, C});
@@ -240,7 +240,7 @@ public class TestRegionObserverInterface {
     }
   }
 
-  @Test
+  @Test (timeout=300000)
   public void testCheckAndPutHooks() throws IOException {
     TableName tableName =
         TableName.valueOf(TEST_TABLE.getNameAsString() + ".testCheckAndPutHooks");
@@ -268,7 +268,7 @@ public class TestRegionObserverInterface {
     }
   }
 
-  @Test
+  @Test (timeout=300000)
   public void testCheckAndDeleteHooks() throws IOException {
     TableName tableName =
         TableName.valueOf(TEST_TABLE.getNameAsString() + ".testCheckAndDeleteHooks");
@@ -298,7 +298,7 @@ public class TestRegionObserverInterface {
     }
   }
 
-  @Test
+  @Test (timeout=300000)
   public void testAppendHook() throws IOException {
     TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testAppendHook");
     Table table = util.createTable(tableName, new byte[][] {A, B, C});
@@ -325,7 +325,7 @@ public class TestRegionObserverInterface {
     }
   }
 
-  @Test
+  @Test (timeout=300000)
   // HBase-3583
   public void testHBase3583() throws IOException {
     TableName tableName =
@@ -377,7 +377,7 @@ public class TestRegionObserverInterface {
     table.close();
   }
 
-  @Test
+  @Test (timeout=300000)
   // HBase-3758
   public void testHBase3758() throws IOException {
     TableName tableName =
@@ -483,7 +483,7 @@ public class TestRegionObserverInterface {
    * Tests overriding compaction handling via coprocessor hooks
    * @throws Exception
    */
-  @Test
+  @Test (timeout=300000)
   public void testCompactionOverride() throws Exception {
     TableName compactTable = TableName.valueOf("TestCompactionOverride");
     Admin admin = util.getHBaseAdmin();
@@ -554,7 +554,7 @@ public class TestRegionObserverInterface {
     table.close();
   }
 
-  @Test
+  @Test (timeout=300000)
   public void bulkLoadHFileTest() throws Exception {
     String testName = TestRegionObserverInterface.class.getName()+".bulkLoadHFileTest";
     TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".bulkLoadHFileTest");
@@ -587,7 +587,7 @@ public class TestRegionObserverInterface {
     }
   }
 
-  @Test
+  @Test (timeout=300000)
   public void testRecovery() throws Exception {
     LOG.info(TestRegionObserverInterface.class.getName() +".testRecovery");
     TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testRecovery");
@@ -637,7 +637,7 @@ public class TestRegionObserverInterface {
     }
   }
 
-  @Test
+  @Test (timeout=300000)
   public void testLegacyRecovery() throws Exception {
     LOG.info(TestRegionObserverInterface.class.getName() +".testLegacyRecovery");
     TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testLegacyRecovery");
@@ -687,7 +687,7 @@ public class TestRegionObserverInterface {
     }
   }
 
-  @Test
+  @Test (timeout=300000)
   public void testPreWALRestoreSkip() throws Exception {
     LOG.info(TestRegionObserverInterface.class.getName() + ".testPreWALRestoreSkip");
     TableName tableName = TableName.valueOf(SimpleRegionObserver.TABLE_SKIPPED);
@@ -772,5 +772,4 @@ public class TestRegionObserverInterface {
       writer.close();
     }
   }
-
-}
+}
\ No newline at end of file


Mime
View raw message