hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mbau...@apache.org
Subject svn commit: r1401513 - in /hbase/branches/0.89-fb/src: main/java/org/apache/hadoop/hbase/io/encoding/ main/java/org/apache/hadoop/hbase/io/hfile/ main/java/org/apache/hadoop/hbase/regionserver/metrics/ test/java/org/apache/hadoop/hbase/ test/java/org/a...
Date Tue, 23 Oct 2012 23:18:59 GMT
Author: mbautin
Date: Tue Oct 23 23:18:59 2012
New Revision: 1401513

URL: http://svn.apache.org/viewvc?rev=1401513&view=rev
Log:
[jira] [HBASE-6983] [89-fb] Metric for unencoded size of cached blocks

Author: mbautin

Summary: We need to measure the amount of unencoded data in the block cache when data block encoding is enabled.

Test Plan: Unit tests

Reviewers: kannan, kranganathan, liyintang, aaiyer, mcorgan

Reviewed By: kannan

Differential Revision: https://reviews.facebook.net/D5979

Modified:
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/metrics/TestSchemaMetrics.java

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java?rev=1401513&r1=1401512&r2=1401513&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java Tue Oct 23 23:18:59 2012
@@ -21,7 +21,6 @@ import java.io.DataOutputStream;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 
-import com.google.common.base.Preconditions;
 import org.apache.hadoop.hbase.KeyValue.SamePrefixComparator;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.util.ByteBufferUtils;
@@ -29,6 +28,8 @@ import org.apache.hadoop.hbase.util.Byte
 import org.apache.hadoop.io.RawComparator;
 import org.apache.hadoop.io.WritableUtils;
 
+import com.google.common.base.Preconditions;
+
 /**
  * Base class for all data block encoders that use a buffer.
  */
@@ -184,6 +185,11 @@ abstract class BufferedDataBlockEncoder 
   public abstract BufferedEncodedWriter createWriter(DataOutputStream out,
       boolean includesMemstoreTS) throws IOException;
 
+  @Override
+  public int getUnencodedSize(ByteBuffer bufferWithoutHeader) {
+    return bufferWithoutHeader.getInt(DataBlockEncoding.ID_SIZE);
+  }
+
   protected static class SeekerState {
     protected int valueOffset = -1;
     protected int keyLength;

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java?rev=1401513&r1=1401512&r2=1401513&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java Tue Oct 23 23:18:59 2012
@@ -40,6 +40,11 @@ public class CopyKeyDataBlockEncoder ext
   }
 
   @Override
+  public int getUnencodedSize(ByteBuffer bufferWithoutHeader) {
+    return bufferWithoutHeader.capacity();
+  }
+
+  @Override
   public ByteBuffer decodeKeyValues(DataInputStream source,
       int preserveHeaderLength, boolean includesMemstoreTS, int totalEncodedSize)
       throws IOException {

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java?rev=1401513&r1=1401512&r2=1401513&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java Tue Oct 23 23:18:59 2012
@@ -80,6 +80,9 @@ public interface DataBlockEncoder {
   public EncodedSeeker createSeeker(RawComparator<byte[]> comparator,
       boolean includesMemstoreTS);
 
+  /** @return unencoded size of the given encoded block or -1 if unknown */
+  public int getUnencodedSize(ByteBuffer bufferWithoutHeader);
+
   /**
    * Create an incremental writer
    * @param out Where to write encoded data

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java?rev=1401513&r1=1401512&r2=1401513&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java Tue Oct 23 23:18:59 2012
@@ -109,6 +109,11 @@ public interface BlockCache {
   public long getBlockCount();
 
   /**
+   * Clear the cache. Used in unit tests. Don't call this in production.
+   */
+  public void clearCache();
+
+  /**
    * Performs a BlockCache summary and returns a List of BlockCacheColumnFamilySummary objects.
    * This method could be fairly heavyweight in that it evaluates the entire HBase file-system
    * against what is in the RegionServer BlockCache.

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java?rev=1401513&r1=1401512&r2=1401513&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java Tue Oct 23 23:18:59 2012
@@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.util.Clas
 public class CachedBlock implements HeapSize, Comparable<CachedBlock> {
 
   public final static long PER_BLOCK_OVERHEAD = ClassSize.align(
-    ClassSize.OBJECT + (3 * ClassSize.REFERENCE) + (2 * Bytes.SIZEOF_LONG) +
+    ClassSize.OBJECT + (3 * ClassSize.REFERENCE) + 3 * Bytes.SIZEOF_LONG +
     ClassSize.STRING + ClassSize.BYTE_BUFFER);
 
   static enum BlockPriority {
@@ -56,12 +56,9 @@ public class CachedBlock implements Heap
   private final Cacheable buf;
   private volatile long accessTime;
   private long size;
+  private long unencodedSize;
   private BlockPriority priority;
 
-  public CachedBlock(BlockCacheKey cacheKey, Cacheable buf, long accessTime) {
-    this(cacheKey, buf, accessTime, false);
-  }
-
   public CachedBlock(BlockCacheKey cacheKey, Cacheable buf, long accessTime,
       boolean inMemory) {
     this.cacheKey = cacheKey;
@@ -72,8 +69,12 @@ public class CachedBlock implements Heap
     // the base classes. We also include the base class
     // sizes in the PER_BLOCK_OVERHEAD variable rather than align()ing them with
     // their buffer lengths. This variable is used elsewhere in unit tests.
-    this.size = ClassSize.align(cacheKey.heapSize())
-        + ClassSize.align(buf.heapSize()) + PER_BLOCK_OVERHEAD;
+    // We assume that block size is under 2 GB.
+
+    long keyAndBlockOverhead = ClassSize.align(cacheKey.heapSize()) + PER_BLOCK_OVERHEAD;
+    this.size = keyAndBlockOverhead + ClassSize.align(buf.heapSize());;
+    this.unencodedSize = keyAndBlockOverhead + ClassSize.align(HFileBlock.getUnencodedSize(buf));
+
     if(inMemory) {
       this.priority = BlockPriority.MEMORY;
     } else {
@@ -111,4 +112,8 @@ public class CachedBlock implements Heap
   public BlockPriority getPriority() {
     return this.priority;
   }
+
+  public long getUnencodedSize() {
+    return unencodedSize;
+  }
 }

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java?rev=1401513&r1=1401512&r2=1401513&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java Tue Oct 23 23:18:59 2012
@@ -53,6 +53,8 @@ import org.apache.hadoop.io.compress.Com
 import org.apache.hadoop.io.compress.Compressor;
 import org.apache.hadoop.io.compress.Decompressor;
 
+import com.google.common.base.Preconditions;
+
 /**
  * Reading {@link HFile} version 1 and 2 blocks, and writing version 2 blocks.
  * <ul>
@@ -74,7 +76,6 @@ import org.apache.hadoop.io.compress.Dec
  * compression algorithm is the same for all the blocks in the {@link HFile},
  * similarly to what was done in version 1.
  * </ul>
- * </ul>
  * The version 2 block representation in the block cache is the same as above,
  * except that the data section is always uncompressed in the cache.
  */
@@ -87,13 +88,6 @@ public class HFileBlock extends SchemaCo
   public static final int HEADER_SIZE = MAGIC_LENGTH + 2 * Bytes.SIZEOF_INT
       + Bytes.SIZEOF_LONG;
 
-  /**
-   * The size of block header when blockType is {@link BlockType#ENCODED_DATA}.
-   * This extends normal header by adding the id of encoder.
-   */
-  public static final int ENCODED_HEADER_SIZE = HEADER_SIZE
-      + DataBlockEncoding.ID_SIZE;
-
   /** Just an array of bytes of the right size. */
   public static final byte[] DUMMY_HEADER = new byte[HEADER_SIZE];
 
@@ -104,6 +98,23 @@ public class HFileBlock extends SchemaCo
   private static final AtomicLong numSeekRead = new AtomicLong();
   private static final AtomicLong numPositionalRead = new AtomicLong();
 
+  private static final int HFILE_BLOCK_OVERHEAD = ClassSize.align(
+      // Base class size, including object overhead.
+      SCHEMA_CONFIGURED_UNALIGNED_HEAP_SIZE +
+
+      // Block type and byte buffer references
+      2 * ClassSize.REFERENCE +
+
+      // On-disk size, uncompressed size, and next block's on-disk size
+      3 * Bytes.SIZEOF_INT +
+
+      // This and previous block offset
+      2 * Bytes.SIZEOF_LONG +
+
+      // "Include memstore timestamp" flag
+      Bytes.SIZEOF_BOOLEAN
+  );
+
   // Instance variables
   private BlockType blockType;
   private int onDiskSizeWithoutHeader;
@@ -164,8 +175,8 @@ public class HFileBlock extends SchemaCo
    * buffer position, but if you slice the buffer beforehand, it will rewind
    * to that point.
    *
-   * @param b
-   * @return
+   * @param b bytes to include in the block
+   * @return the new block
    * @throws IOException
    */
   private HFileBlock(ByteBuffer b) throws IOException {
@@ -403,18 +414,7 @@ public class HFileBlock extends SchemaCo
 
   @Override
   public long heapSize() {
-    long size = ClassSize.align(
-        // Base class size, including object overhead.
-        SCHEMA_CONFIGURED_UNALIGNED_HEAP_SIZE +
-        // Block type and byte buffer references
-        2 * ClassSize.REFERENCE +
-        // On-disk size, uncompressed size, and next block's on-disk size
-        3 * Bytes.SIZEOF_INT +
-        // This and previous block offset
-        2 * Bytes.SIZEOF_LONG +
-        // "Include memstore timestamp" flag
-        Bytes.SIZEOF_BOOLEAN
-    );
+    long size = HFILE_BLOCK_OVERHEAD;
 
     if (buf != null) {
       // Deep overhead of the byte buffer. Needs to be aligned separately.
@@ -471,6 +471,31 @@ public class HFileBlock extends SchemaCo
   }
 
   /**
+   * Used in maintaining total unencoded size stats in the block cache.
+   * @return unencoded size of the given cache block
+   */
+  public static long getUnencodedSize(Cacheable buf) {
+    Preconditions.checkNotNull(buf);
+
+    if (buf instanceof HFileBlock) {
+      HFileBlock b = (HFileBlock) buf;
+      if (b.blockType == BlockType.ENCODED_DATA) {
+        short encodingId = b.getDataBlockEncodingId();
+        DataBlockEncoding encoding = DataBlockEncoding.getEncodingById(encodingId);
+        Preconditions.checkNotNull(encoding);
+        DataBlockEncoder encoder = encoding.getEncoder();
+        Preconditions.checkNotNull(encoder);
+        int unencodedSize = encoder.getUnencodedSize(b.getBufferWithoutHeader());
+        Preconditions.checkState(unencodedSize >= -1);
+        if (unencodedSize >= 0) {
+          return ClassSize.align(HFILE_BLOCK_OVERHEAD + HEADER_SIZE + unencodedSize);
+        }
+      }
+    }
+    return buf.heapSize();
+  }
+
+  /**
    * Unified version 2 {@link HFile} block writer. The intended usage pattern
    * is as follows:
    * <ul>
@@ -479,8 +504,8 @@ public class HFileBlock extends SchemaCo
    * <li>Call {@link Writer#startWriting(BlockType)} and get a data stream to
    * write to
    * <li>Write your data into the stream
-   * <li>Call {@link Writer#writeHeaderAndData} as many times as you need to
-   * store the serialized block into an external stream, or call
+   * <li>Call {@link Writer#writeHeaderAndData(java.io.DataOutputStream)} as many times as you
+   * need to store the serialized block into an external stream, or call
    * {@link Writer#getHeaderAndData()} to get it as a byte array.
    * <li>Repeat to write more blocks
    * </ul>

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java?rev=1401513&r1=1401512&r2=1401513&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java Tue Oct 23 23:18:59 2012
@@ -138,7 +138,7 @@ public class LruBlockCache implements Bl
         .setDaemon(true)
         .build());
 
-  /** Current size of cache */
+  /** Current size of cache in bytes */
   private final AtomicLong size;
 
   /** Current number of cached elements */
@@ -335,17 +335,20 @@ public class LruBlockCache implements Bl
    * @param evict
    */
   protected long updateSizeMetrics(CachedBlock cb, boolean evict) {
-    long heapsize = cb.heapSize();
+    long heapSizeDelta = cb.heapSize();
+    long unencodedSizeDelta = cb.getUnencodedSize();
     if (evict) {
-      heapsize *= -1;
+      heapSizeDelta *= -1;
+      unencodedSizeDelta *= -1;
     }
     Cacheable cachedBlock = cb.getBuffer();
     SchemaMetrics schemaMetrics = cachedBlock.getSchemaMetrics();
     if (schemaMetrics != null) {
       schemaMetrics.updateOnCachePutOrEvict(
-          cachedBlock.getBlockType().getCategory(), heapsize);
+          cachedBlock.getBlockType().getCategory(), heapSizeDelta,
+          unencodedSizeDelta);
     }
-    return size.addAndGet(heapsize);
+    return size.addAndGet(heapSizeDelta);
   }
 
   /**
@@ -918,6 +921,7 @@ public class LruBlockCache implements Bl
   }
 
   /** Clears the cache. Updates per-block-category counts accordingly. Used in tests. */
+  @Override
   public void clearCache() {
     map.clear();
     SchemaMetrics.clearBlockCacheMetrics();

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java?rev=1401513&r1=1401512&r2=1401513&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java Tue Oct 23 23:18:59 2012
@@ -136,5 +136,10 @@ public class SimpleBlockCache implements
     // TODO: implement this if we ever actually use this block cache
     return 0;
   }
+
+  @Override
+  public void clearCache() {
+    cache.clear();
+  }
 }
 

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java?rev=1401513&r1=1401512&r2=1401513&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java Tue Oct 23 23:18:59 2012
@@ -30,9 +30,11 @@ import java.util.Set;
 import java.util.TreeMap;
 import java.util.TreeSet;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import com.google.common.base.Preconditions;
 import org.apache.commons.lang.mutable.MutableDouble;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -106,27 +108,32 @@ public class SchemaMetrics {
 
   private static final Log LOG = LogFactory.getLog(SchemaMetrics.class);
 
+  private static final int COMPACTION_AWARE_METRIC_FLAG = 0x01;
+  private static final int TIME_VARYING_METRIC_FLAG = 0x02;
+  private static final int PERSISTENT_METRIC_FLAG = 0x04;
+
   public static enum BlockMetricType {
-    // Metric configuration: compactionAware, timeVarying
-    READ_TIME("Read",                   true, true),
-    READ_COUNT("BlockReadCnt",          true, false),
-    CACHE_HIT("BlockReadCacheHitCnt",   true, false),
-    CACHE_MISS("BlockReadCacheMissCnt", true, false),
-
-    CACHE_SIZE("blockCacheSize",        false, false),
-    CACHE_NUM_BLOCKS("cacheNumBlocks",  false, false), 
-    CACHED("blockCacheNumCached",       false, false),
-    EVICTED("blockCacheNumEvicted",     false, false);
+    READ_TIME("Read", COMPACTION_AWARE_METRIC_FLAG | TIME_VARYING_METRIC_FLAG),
+    READ_COUNT("BlockReadCnt", COMPACTION_AWARE_METRIC_FLAG),
+    CACHE_HIT("BlockReadCacheHitCnt", COMPACTION_AWARE_METRIC_FLAG),
+    CACHE_MISS("BlockReadCacheMissCnt", COMPACTION_AWARE_METRIC_FLAG),
+
+    CACHE_SIZE("blockCacheSize", PERSISTENT_METRIC_FLAG),
+    UNENCODED_CACHE_SIZE("blockCacheUnencodedSize", PERSISTENT_METRIC_FLAG),
+    CACHE_NUM_BLOCKS("cacheNumBlocks", PERSISTENT_METRIC_FLAG),
+    CACHED("blockCacheNumCached"),
+    EVICTED("blockCacheNumEvicted");
 
     private final String metricStr;
-    private final boolean compactionAware;
-    private final boolean timeVarying;
+    private final int flags;
+
+    BlockMetricType(String metricStr) {
+      this(metricStr, 0);
+    }
 
-    BlockMetricType(String metricStr, boolean compactionAware,
-          boolean timeVarying) {
+    BlockMetricType(String metricStr, int flags) {
       this.metricStr = metricStr;
-      this.compactionAware = compactionAware;
-      this.timeVarying = timeVarying;
+      this.flags = flags;
     }
 
     @Override
@@ -144,6 +151,18 @@ public class SchemaMetrics {
       }
       BLOCK_METRIC_TYPE_RE = sb.toString();
     }
+
+    final boolean compactionAware() {
+      return (flags & COMPACTION_AWARE_METRIC_FLAG) != 0;
+    }
+
+    private final boolean timeVarying() {
+      return (flags & TIME_VARYING_METRIC_FLAG) != 0;
+    }
+
+    final boolean persistent() {
+      return (flags & PERSISTENT_METRIC_FLAG) != 0;
+    }
   };
 
   public static enum StoreMetricType {
@@ -153,18 +172,28 @@ public class SchemaMetrics {
     STATIC_BLOOM_SIZE_KB("staticBloomSizeKB"),
     MEMSTORE_SIZE_MB("memstoreSizeMB"),
     STATIC_INDEX_SIZE_KB("staticIndexSizeKB"),
-    FLUSH_SIZE("flushSize");
+    FLUSH_SIZE("flushSize", PERSISTENT_METRIC_FLAG);
 
     private final String metricStr;
+    private final int flags;
 
-    StoreMetricType(String metricStr) {
+    private StoreMetricType(String metricStr) {
+      this(metricStr, 0);
+    }
+
+    private StoreMetricType(String metricStr, int flags) {
       this.metricStr = metricStr;
+      this.flags = flags;
     }
 
     @Override
     public String toString() {
       return metricStr;
     }
+
+    private final boolean persistent() {
+      return (flags & PERSISTENT_METRIC_FLAG) != 0;
+    }
   };
 
   // Constants
@@ -175,6 +204,7 @@ public class SchemaMetrics {
    * per-CF/table metrics.
    */
   public static final String UNKNOWN = "__unknown";
+
   public static final String TABLE_PREFIX = "tbl.";
   public static final String CF_PREFIX = "cf.";
   public static final String BLOCK_TYPE_PREFIX = "bt.";
@@ -217,13 +247,18 @@ public class SchemaMetrics {
   private static final String SHOW_TABLE_NAME_CONF_KEY =
       "hbase.metrics.showTableName";
 
+  private static final String WORD_BOUNDARY_RE_STR = "\\b";
+
+  private static final String COMPACTION_METRIC_PREFIX = "compaction";
+  private static final String NON_COMPACTION_METRIC_PREFIX = "fs";
+
   // Global variables
   /**
    * Maps a string key consisting of table name and column family name, with
    * table name optionally replaced with {@link #TOTAL_KEY} if per-table
    * metrics are disabled, to an instance of this class.
    */
-  private static final ConcurrentHashMap<String, SchemaMetrics>
+  private static final ConcurrentMap<String, SchemaMetrics>
       tableAndFamilyToMetrics = new ConcurrentHashMap<String, SchemaMetrics>();
 
   /** Metrics for all tables and column families. */
@@ -231,6 +266,23 @@ public class SchemaMetrics {
   public static final SchemaMetrics ALL_SCHEMA_METRICS =
     getInstance(TOTAL_KEY, TOTAL_KEY);
 
+  private static final Pattern PERSISTENT_METRIC_RE;
+  static {
+    StringBuilder sb = new StringBuilder();
+    for (BlockMetricType bmt : BlockMetricType.values()) {
+      if (bmt.persistent()) {
+        sb.append((sb.length() == 0 ? "" : "|") + bmt);
+      }
+    }
+    for (StoreMetricType smt : StoreMetricType.values()) {
+      if (smt.persistent()) {
+        sb.append((sb.length() == 0 ? "" : "|") + smt);
+      }
+    }
+    PERSISTENT_METRIC_RE = Pattern.compile(".*" + WORD_BOUNDARY_RE_STR +
+        "(" + META_BLOCK_CATEGORY_STR + ")?(" + sb + ")$");
+  }
+
   /**
    * Whether to include table name in metric names. If this is null, it has not
    * been initialized. This is a global instance, but we also have a copy of it
@@ -257,7 +309,7 @@ public class SchemaMetrics {
     for (BlockCategory blockCategory : BlockCategory.values()) {
       for (boolean isCompaction : BOOL_VALUES) {
         for (BlockMetricType metricType : BlockMetricType.values()) {
-          if (!metricType.compactionAware && isCompaction) {
+          if (!metricType.compactionAware() && isCompaction) {
             continue;
           }
 
@@ -270,8 +322,8 @@ public class SchemaMetrics {
             sb.append(BLOCK_TYPE_PREFIX + categoryStr + ".");
           }
 
-          if (metricType.compactionAware) {
-            sb.append(isCompaction ? "compaction" : "fs");
+          if (metricType.compactionAware()) {
+            sb.append(isCompaction ? COMPACTION_METRIC_PREFIX : NON_COMPACTION_METRIC_PREFIX);
           }
 
           // A special-case for meta blocks for backwards-compatibility.
@@ -283,7 +335,7 @@ public class SchemaMetrics {
 
           int i = getBlockMetricIndex(blockCategory, isCompaction, metricType);
           blockMetricNames[i] = sb.toString().intern();
-          blockMetricTimeVarying[i] = metricType.timeVarying;
+          blockMetricTimeVarying[i] = metricType.timeVarying();
         }
       }
     }
@@ -342,7 +394,7 @@ public class SchemaMetrics {
 
   public String getBlockMetricName(BlockCategory blockCategory,
       boolean isCompaction, BlockMetricType metricType) {
-    if (isCompaction && !metricType.compactionAware) {
+    if (isCompaction && !metricType.compactionAware()) {
       throw new IllegalArgumentException("isCompaction cannot be true for "
           + metricType);
     }
@@ -430,6 +482,7 @@ public class SchemaMetrics {
    */
   public void updatePersistentStoreMetric(StoreMetricType storeMetricType,
       long value) {
+    Preconditions.checkArgument(storeMetricType.persistent());
     HRegion.incrNumericPersistentMetric(
         storeMetricNames[storeMetricType.ordinal()], value);
   }
@@ -471,17 +524,20 @@ public class SchemaMetrics {
    * metric is "persistent", i.e. it does not get reset when metrics are
    * collected.
    */
-  private void addToCacheSize(BlockCategory category, long cacheSizeDelta) {
+  private void addToCacheSize(BlockCategory category, long cacheSizeDelta,
+      long unencodedCacheSizeDelta) {
     if (category == null) {
       category = BlockCategory.ALL_CATEGORIES;
     }
     HRegion.incrNumericPersistentMetric(getBlockMetricName(category, DEFAULT_COMPACTION_FLAG,
         BlockMetricType.CACHE_SIZE), cacheSizeDelta);
     HRegion.incrNumericPersistentMetric(getBlockMetricName(category, DEFAULT_COMPACTION_FLAG,
+        BlockMetricType.UNENCODED_CACHE_SIZE), unencodedCacheSizeDelta);
+    HRegion.incrNumericPersistentMetric(getBlockMetricName(category, DEFAULT_COMPACTION_FLAG,
         BlockMetricType.CACHE_NUM_BLOCKS), cacheSizeDelta > 0 ? 1 : -1);
 
     if (category != BlockCategory.ALL_CATEGORIES) {
-      addToCacheSize(BlockCategory.ALL_CATEGORIES, cacheSizeDelta);
+      addToCacheSize(BlockCategory.ALL_CATEGORIES, cacheSizeDelta, unencodedCacheSizeDelta);
     }
   }
 
@@ -490,14 +546,19 @@ public class SchemaMetrics {
    * and all table/CFs (by calling the same method on {@link #ALL_SCHEMA_METRICS}), both the given
    * block category and all block categories aggregated, and the given block size.
    * @param blockCategory block category, e.g. index or data
-   * @param cacheSizeDelta the size of the block being cached (positive) or evicted (negative) 
-   */
-  public void updateOnCachePutOrEvict(BlockCategory blockCategory, long cacheSizeDelta) {
-    addToCacheSize(blockCategory, cacheSizeDelta);
+   * @param cacheSizeDelta the size of the block being cached (positive) or evicted (negative)
+   * @param unencodedCacheSizeDelta the amount to add to unencoded cache size. Must have the same
+   *                                sign as cacheSizeDelta.
+   */
+  public void updateOnCachePutOrEvict(BlockCategory blockCategory, long cacheSizeDelta,
+      long unencodedCacheSizeDelta) {
+    Preconditions.checkState((cacheSizeDelta > 0) == (unencodedCacheSizeDelta > 0));
+    addToCacheSize(blockCategory, cacheSizeDelta, unencodedCacheSizeDelta);
     incrNumericMetric(blockCategory, DEFAULT_COMPACTION_FLAG,
         cacheSizeDelta > 0 ? BlockMetricType.CACHED : BlockMetricType.EVICTED);
     if (this != ALL_SCHEMA_METRICS) {
-      ALL_SCHEMA_METRICS.updateOnCachePutOrEvict(blockCategory, cacheSizeDelta);
+      ALL_SCHEMA_METRICS.updateOnCachePutOrEvict(blockCategory, cacheSizeDelta,
+          unencodedCacheSizeDelta);
     }
   }
 
@@ -638,22 +699,31 @@ public class SchemaMetrics {
 
   /** "tab.<table_name>." */
   private static final String TABLE_NAME_RE_STR =
-      "\\b" + regexEscape(TABLE_PREFIX) + WORD_AND_DOT_RE_STR;
+      WORD_BOUNDARY_RE_STR + regexEscape(TABLE_PREFIX) + WORD_AND_DOT_RE_STR;
 
   /** "cf.<cf_name>." */
   private static final String CF_NAME_RE_STR =
-      "\\b" + regexEscape(CF_PREFIX) + WORD_AND_DOT_RE_STR;
+      WORD_BOUNDARY_RE_STR + regexEscape(CF_PREFIX) + WORD_AND_DOT_RE_STR;
   private static final Pattern CF_NAME_RE = Pattern.compile(CF_NAME_RE_STR);
 
   /** "tab.<table_name>.cf.<cf_name>." */
   private static final Pattern TABLE_AND_CF_NAME_RE = Pattern.compile(
       TABLE_NAME_RE_STR + CF_NAME_RE_STR);
 
-  private static final Pattern BLOCK_CATEGORY_RE = Pattern.compile(
-      "\\b" + regexEscape(BLOCK_TYPE_PREFIX) + "[^.]+\\." +
+  private static final String COMPACTION_PREFIX_RE_STR = "(" +
+      NON_COMPACTION_METRIC_PREFIX + "|" + COMPACTION_METRIC_PREFIX + ")?";
+
+  static final Pattern BLOCK_CATEGORY_RE = Pattern.compile(
+      "(" +
+      WORD_BOUNDARY_RE_STR +
+      regexEscape(BLOCK_TYPE_PREFIX) + "[^.]+\\." +
       // Also remove the special-case block type marker for meta blocks
-      "|" + META_BLOCK_CATEGORY_STR + "(?=" +
-      BlockMetricType.BLOCK_METRIC_TYPE_RE + ")");
+      "|" +
+      // Note we are not using word boundary here because "fs" or "compaction" may precede "Meta"
+      META_BLOCK_CATEGORY_STR +
+      ")" +
+      // Positive lookahead for block metric types. Needed for both meta and non-meta metrics.
+      "(?=" + COMPACTION_PREFIX_RE_STR + "(" + BlockMetricType.BLOCK_METRIC_TYPE_RE + "))");
 
   /**
    * A suffix for the "number of operations" part of "time-varying metrics". We
@@ -701,11 +771,15 @@ public class SchemaMetrics {
     return allMetricNames;
   }
 
-  private static final boolean isTimeVaryingKey(String metricKey) {
+  private static final boolean isTimeVaryingMetricKey(String metricKey) {
     return metricKey.endsWith(NUM_OPS_SUFFIX)
         || metricKey.endsWith(TOTAL_SUFFIX);
   }
 
+  static final boolean isPersistentMetricKey(String metricKey) {
+    return PERSISTENT_METRIC_RE.matcher(metricKey).matches();
+  }
+
   private static final String stripTimeVaryingSuffix(String metricKey) {
     return TIME_VARYING_SUFFIX_RE.matcher(metricKey).replaceAll("");
   }
@@ -715,11 +789,13 @@ public class SchemaMetrics {
     for (SchemaMetrics cfm : tableAndFamilyToMetrics.values()) {
       for (String metricName : cfm.getAllMetricNames()) {
         long metricValue;
-        if (isTimeVaryingKey(metricName)) {
+        if (isTimeVaryingMetricKey(metricName)) {
           Pair<Long, Integer> totalAndCount =
               HRegion.getTimeVaryingMetric(stripTimeVaryingSuffix(metricName));
           metricValue = metricName.endsWith(TOTAL_SUFFIX) ?
               totalAndCount.getFirst() : totalAndCount.getSecond();
+        } else if (isPersistentMetricKey(metricName)) {
+          metricValue = HRegion.getNumericPersistentMetric(metricName);
         } else {
           metricValue = HRegion.getNumericMetric(metricName);
         }
@@ -735,6 +811,10 @@ public class SchemaMetrics {
     return l != null ? l : 0;
   }
 
+  private static void incrLong(Map<String, Long> m, String k, long delta) {
+    putLong(m, k, getLong(m, k) + delta);
+  }
+
   private static void putLong(Map<String, Long> m, String k, long v) {
     if (v != 0) {
       m.put(k, v);
@@ -758,7 +838,26 @@ public class SchemaMetrics {
     return diff;
   }
 
+  /**
+   * Checks whether metric changes between the given old state and the current state are consistent.
+   * If they are not, throws an {@link AssertionError}.
+   */
   public static void validateMetricChanges(Map<String, Long> oldMetrics) {
+    if (!validateMetricChangesInternal(oldMetrics, true)) {
+      // This will output diagnostic info and throw an assertion error.
+      validateMetricChangesInternal(oldMetrics, false);
+    }
+  }
+
+  /**
+   * Validates metric changes between the given set of old metric values and the current values.
+   * @param oldMetrics old metric value map
+   * @param quiet if true, don't output anything and return whether validation is successful;
+   *              if false, output diagnostic info and throw an assertion if validation fails
+   * @return whether validation is successful
+   */
+  private static boolean validateMetricChangesInternal(Map<String, Long> oldMetrics,
+      boolean quiet) {
     final Map<String, Long> newMetrics = getMetricsSnapshot();
     final Map<String, Long> allCfDeltas = new TreeMap<String, Long>();
     final Map<String, Long> allBlockCategoryDeltas =
@@ -772,39 +871,47 @@ public class SchemaMetrics {
     for (SchemaMetrics cfm : tableAndFamilyToMetrics.values()) {
       for (String metricName : cfm.getAllMetricNames()) {
         if (metricName.startsWith(CF_PREFIX + CF_PREFIX)) {
-          throw new AssertionError("Column family prefix used twice: " +
-              metricName);
+          if (quiet) {
+            return false;
+          } else {
+            throw new AssertionError("Column family prefix used twice: " +
+                metricName);
+          }
         }
 
         final long oldValue = getLong(oldMetrics, metricName);
         final long newValue = getLong(newMetrics, metricName);
         final long delta = newValue - oldValue;
 
+        if (delta == 0) {
+          continue;
+        }
+
         // Re-calculate values of metrics with no column family (or CF/table)
         // specified based on all metrics with CF (or CF/table) specified.
-        if (delta != 0) {
-          if (cfm != ALL_SCHEMA_METRICS) {
-            final String aggregateMetricName =
-                cfTableMetricRE.matcher(metricName).replaceAll("");
-            if (!aggregateMetricName.equals(metricName)) {
+        if (cfm != ALL_SCHEMA_METRICS) {
+          final String aggregateMetricName =
+              cfTableMetricRE.matcher(metricName).replaceAll("");
+          if (!aggregateMetricName.equals(metricName)) {
+            if (!quiet) {
               LOG.debug("Counting " + delta + " units of " + metricName
                   + " towards " + aggregateMetricName);
-
-              putLong(allCfDeltas, aggregateMetricName,
-                  getLong(allCfDeltas, aggregateMetricName) + delta);
             }
-          } else {
-            LOG.debug("Metric=" + metricName + ", delta=" + delta);
+
+            incrLong(allCfDeltas, aggregateMetricName, delta);
           }
         }
 
         Matcher matcher = BLOCK_CATEGORY_RE.matcher(metricName);
         if (matcher.find()) {
-           // Only process per-block-category metrics
+          // Only process per-block-category metrics
           String metricNoBlockCategory = matcher.replaceAll("");
+          if (!quiet) {
+            LOG.debug("Counting " + delta + " units of " + metricName + " towards " +
+                metricNoBlockCategory);
+          }
 
-          putLong(allBlockCategoryDeltas, metricNoBlockCategory,
-              getLong(allBlockCategoryDeltas, metricNoBlockCategory) + delta);
+          incrLong(allBlockCategoryDeltas, metricNoBlockCategory, delta);
         }
       }
     }
@@ -814,8 +921,9 @@ public class SchemaMetrics {
       long actual = getLong(deltas, key);
       long expected = getLong(allCfDeltas, key);
       if (actual != expected) {
-        if (errors.length() > 0)
+        if (errors.length() > 0) {
           errors.append("\n");
+        }
         errors.append("The all-CF metric " + key + " changed by "
             + actual + " but the aggregation of per-CF/table metrics "
             + "yields " + expected);
@@ -835,8 +943,9 @@ public class SchemaMetrics {
       long actual = getLong(deltas, key);
       long expected = getLong(allBlockCategoryDeltas, key);
       if (actual != expected) {
-        if (errors.length() > 0)
+        if (errors.length() > 0) {
           errors.append("\n");
+        }
         errors.append("The all-block-category metric " + key
             + " changed by " + actual + " but the aggregation of "
             + "per-block-category metrics yields " + expected);
@@ -846,8 +955,13 @@ public class SchemaMetrics {
     checkNumBlocksInCache();
 
     if (errors.length() > 0) {
-      throw new AssertionError(errors.toString());
+      if (quiet) {
+        return false;
+      } else {
+        throw new AssertionError(errors.toString());
+      }
     }
+    return true;
   }
 
   /**

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java?rev=1401513&r1=1401512&r2=1401513&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java Tue Oct 23 23:18:59 2012
@@ -65,6 +65,8 @@ import org.apache.hadoop.hbase.client.Re
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.ServerConnectionManager;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.io.hfile.BlockCache;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.Compression;
 import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
@@ -106,6 +108,7 @@ import com.google.common.base.Preconditi
 public class HBaseTestingUtility {
   private final static Log LOG = LogFactory.getLog(HBaseTestingUtility.class);
   private final Configuration conf;
+  private final CacheConfig cacheConf;
   private MiniZooKeeperCluster zkCluster = null;
 
   /**
@@ -188,6 +191,7 @@ public class HBaseTestingUtility {
 
   public HBaseTestingUtility(Configuration conf) {
     this.conf = conf;
+    cacheConf = new CacheConfig(conf);
   }
 
   /**
@@ -1208,7 +1212,6 @@ public class HBaseTestingUtility {
    * Wait until <code>countOfRegion</code> in .META. have a non-empty
    * info:server.  This means all regions have been deployed, master has been
    * informed and updated .META. with the regions deployed server.
-   * @param conf Configuration
    * @param countOfRegions How many regions in .META.
    * @throws IOException
    */
@@ -1598,4 +1601,17 @@ REGION_LOOP:
       master.stop("killMiniHBaseCluster");
     }
   }
+
+  public void dropDefaultTable() throws Exception {
+    HBaseAdmin admin = new HBaseAdmin(getConfiguration());
+    if (admin.tableExists(HTestConst.DEFAULT_TABLE_BYTES)) {
+      admin.disableTable(HTestConst.DEFAULT_TABLE_BYTES);
+      admin.deleteTable(HTestConst.DEFAULT_TABLE_BYTES);
+    }
+    admin.close();
+  }
+
+  public BlockCache getBlockCache() {
+    return cacheConf.getBlockCache();
+  }
 }

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java?rev=1401513&r1=1401512&r2=1401513&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java Tue Oct 23 23:18:59 2012
@@ -338,10 +338,9 @@ public class TestCacheOnWrite {
       }
       region.flushcache();
     }
-    LruBlockCache blockCache =
-        (LruBlockCache) new CacheConfig(conf).getBlockCache();
+    BlockCache blockCache = TEST_UTIL.getBlockCache();
     blockCache.clearCache();
-    assertEquals(0, blockCache.getBlockTypeCountsForTest().size());
+    assertEquals(0, ((LruBlockCache) blockCache).getBlockTypeCountsForTest().size());
     Map<String, Long> metricsBefore = SchemaMetrics.getMetricsSnapshot();
     region.compactStores();
     LOG.debug("compactStores() returned");
@@ -350,7 +349,7 @@ public class TestCacheOnWrite {
         metricsBefore, SchemaMetrics.getMetricsSnapshot());
     LOG.debug(SchemaMetrics.formatMetrics(compactionMetrics));
     Map<BlockType, Integer> blockTypesInCache =
-        blockCache.getBlockTypeCountsForTest();
+        ((LruBlockCache) blockCache).getBlockTypeCountsForTest();
     LOG.debug("Block types in cache: " + blockTypesInCache);
     assertNull(blockTypesInCache.get(BlockType.DATA));
     region.close();

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java?rev=1401513&r1=1401512&r2=1401513&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java Tue Oct 23 23:18:59 2012
@@ -189,6 +189,6 @@ public class EncodedSeekPerformanceTest 
   }
 
   private void clearBlockCache() {
-    ((LruBlockCache) cacheConf.getBlockCache()).clearCache();
+    testingUtility.getBlockCache().clearCache();
   }
 }

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java?rev=1401513&r1=1401512&r2=1401513&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java Tue Oct 23 23:18:59 2012
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hbase.regionserver;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.util.Arrays;
@@ -26,12 +27,18 @@ import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTestConst;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.io.hfile.BlockType;
 import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
 import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.StoreMetricType;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -59,23 +66,26 @@ public class TestRegionServerMetrics {
   private static final SchemaMetrics ALL_METRICS =
       SchemaMetrics.ALL_SCHEMA_METRICS;
 
-  private final HBaseTestingUtility TEST_UTIL =
+  private final HBaseTestingUtility testUtil =
       new HBaseTestingUtility();
+  private final Configuration conf = testUtil.getConfiguration();
 
   private Map<String, Long> startingMetrics;
 
   private final int META_AND_ROOT = 2;
 
+  private final int NUM_ROWS = 10000;
+
   @Before
   public void setUp() throws Exception {
     SchemaMetrics.setUseTableNameInTest(true);
     startingMetrics = SchemaMetrics.getMetricsSnapshot();
-    TEST_UTIL.startMiniCluster();
+    testUtil.startMiniCluster();
   }
 
   @After
   public void tearDown() throws IOException {
-    TEST_UTIL.shutdownMiniCluster();
+    testUtil.shutdownMiniCluster();
     SchemaMetrics.validateMetricChanges(startingMetrics);
   }
 
@@ -93,13 +103,11 @@ public class TestRegionServerMetrics {
   @Test
   public void testMultipleRegions() throws IOException, InterruptedException {
 
-    TEST_UTIL.createRandomTable(
-        TABLE_NAME,
-        Arrays.asList(FAMILIES),
-        MAX_VERSIONS, NUM_COLS_PER_ROW, NUM_FLUSHES, NUM_REGIONS, 1000);
+    testUtil.createRandomTable(TABLE_NAME, Arrays.asList(FAMILIES), MAX_VERSIONS, NUM_COLS_PER_ROW,
+        NUM_FLUSHES, NUM_REGIONS, 1000);
 
     final HRegionServer rs =
-        TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
+        testUtil.getMiniHBaseCluster().getRegionServer(0);
 
     assertEquals(NUM_REGIONS + META_AND_ROOT, rs.getOnlineRegions().size());
 
@@ -163,7 +171,7 @@ public class TestRegionServerMetrics {
     byte[] CF2 = Bytes.toBytes(cf2Name);
 
     long ts = 1234;
-    HTable hTable = TEST_UTIL.createTable(TABLE, new byte[][]{CF1, CF2});
+    HTable hTable = testUtil.createTable(TABLE, new byte[][]{CF1, CF2});
 
     Put p = new Put(ROW);
     p.add(CF1, CF1, ts, CF1);
@@ -198,7 +206,7 @@ public class TestRegionServerMetrics {
         new int[] {kvLength, kvLength, kvLength, kvLength});
 
     // getsize/nextsize should not be set on flush or compaction
-    for (HRegion hr : TEST_UTIL.getMiniHBaseCluster().getRegions(TABLE)) {
+    for (HRegion hr : testUtil.getMiniHBaseCluster().getRegions(TABLE)) {
       hr.flushcache();
       hr.compactStores();
     }
@@ -208,12 +216,10 @@ public class TestRegionServerMetrics {
 
   @Test
   public void testNumReadsAndWrites() throws IOException, InterruptedException{
-    TEST_UTIL.createRandomTable(
-        "NumReadsWritesTest",
-        Arrays.asList(FAMILIES),
-        MAX_VERSIONS, NUM_COLS_PER_ROW, NUM_FLUSHES, NUM_REGIONS, 1000);
+    testUtil.createRandomTable("NumReadsWritesTest", Arrays.asList(FAMILIES), MAX_VERSIONS,
+        NUM_COLS_PER_ROW, NUM_FLUSHES, NUM_REGIONS, 1000);
     final HRegionServer rs =
-        TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
+        testUtil.getMiniHBaseCluster().getRegionServer(0);
 
     // This may not be necessary since we verify the number of reads and writes from atomic
     // variables and not from collected metrics.
@@ -226,4 +232,62 @@ public class TestRegionServerMetrics {
     Assert.assertEquals(rs.getOnlineRegions().size(), rs.getNumReads().get());
     Assert.assertEquals(0, rs.getNumWrites().get());
   }
+
+  @Test
+  public void testEncodingInCache() throws Exception {
+    HTable t = null;
+    try {
+      HColumnDescriptor hcd = new HColumnDescriptor(HTestConst.DEFAULT_CF_BYTES)
+          .setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);
+      testUtil.createTable(HTestConst.DEFAULT_TABLE_BYTES, new HColumnDescriptor[]{hcd});
+      t = new HTable(conf, HTestConst.DEFAULT_TABLE_BYTES);
+
+      // Write some test data
+      for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
+        String rowStr = "row" + iRow;
+        byte[] rowBytes = Bytes.toBytes(rowStr);
+        Put p = new Put(rowBytes);
+        for (int iQual = 0; iQual < NUM_COLS_PER_ROW; ++iQual) {
+          String qualStr = "q" + iQual;
+          byte[] qualBytes = Bytes.toBytes(qualStr);
+          String valueStr = "v" + Integer.toString(iRow + iQual);
+          byte[] valueBytes = Bytes.toBytes(valueStr);
+          p.add(HTestConst.DEFAULT_CF_BYTES, qualBytes, valueBytes);
+        }
+        t.put(p);
+      }
+      HBaseAdmin adm = new HBaseAdmin(conf);
+      adm.flush(HTestConst.DEFAULT_TABLE_STR);
+      adm.close();
+
+      LOG.info("Clearing cache and reading");
+      testUtil.getBlockCache().clearCache();
+      startingMetrics = SchemaMetrics.getMetricsSnapshot();
+      // Read all data to bring it into cache.
+      for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
+        Get g = new Get(Bytes.toBytes("row" + iRow));
+        g.addFamily(HTestConst.DEFAULT_CF_BYTES);
+        t.get(g);
+      }
+
+      // Check metrics
+      Map<String, Long> m = SchemaMetrics.diffMetrics(this.startingMetrics,
+          SchemaMetrics.getMetricsSnapshot());
+      LOG.info("Metrics after reading:\n" + SchemaMetrics.formatMetrics(m));
+      long dataBlockEncodedSize = SchemaMetrics.getLong(m,
+          SchemaMetrics.ALL_SCHEMA_METRICS.getBlockMetricName(
+          BlockType.BlockCategory.DATA, false, SchemaMetrics.BlockMetricType.CACHE_SIZE));
+      long dataBlockUnencodedSize = SchemaMetrics.getLong(m,
+          SchemaMetrics.ALL_SCHEMA_METRICS.getBlockMetricName(BlockType.BlockCategory.DATA, false,
+              SchemaMetrics.BlockMetricType.UNENCODED_CACHE_SIZE));
+      LOG.info("Data block encoded size in cache: " + dataBlockEncodedSize);
+      LOG.info("Data block unencoded size in cache: " + dataBlockEncodedSize);
+      assertTrue(dataBlockEncodedSize * 2 < dataBlockUnencodedSize);
+    } finally {
+      if (t != null) {
+        t.close();
+      }
+      testUtil.dropDefaultTable();
+    }
+  }
 }

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/metrics/TestSchemaMetrics.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/metrics/TestSchemaMetrics.java?rev=1401513&r1=1401512&r2=1401513&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/metrics/TestSchemaMetrics.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/metrics/TestSchemaMetrics.java Tue Oct 23 23:18:59 2012
@@ -31,7 +31,10 @@ import java.util.Random;
 import java.util.Set;
 import java.util.TreeSet;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.io.hfile.BlockType;
 import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
 import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.BlockMetricType;
@@ -46,10 +49,15 @@ import org.junit.runners.Parameterized.P
 @RunWith(Parameterized.class)
 public class TestSchemaMetrics {
 
+  private static final Log LOG = LogFactory.getLog(TestSchemaMetrics.class);
+
   private final String TABLE_NAME = "myTable";
   private final String CF_NAME = "myColumnFamily";
 
   private final boolean useTableName;
+  private final String metricPrefix;
+  private final SchemaMetrics schemaMetrics;
+
   private Map<String, Long> startingMetrics;
 
   @Parameters
@@ -60,6 +68,10 @@ public class TestSchemaMetrics {
   public TestSchemaMetrics(boolean useTableName) {
     this.useTableName = useTableName;
     SchemaMetrics.setUseTableNameInTest(useTableName);
+    metricPrefix = (useTableName ? SchemaMetrics.TABLE_PREFIX +
+        TABLE_NAME + "." : "") + SchemaMetrics.CF_PREFIX + CF_NAME + ".";
+    schemaMetrics = SchemaMetrics.getInstance(TABLE_NAME,
+        CF_NAME);
   }
 
   @Before
@@ -68,11 +80,45 @@ public class TestSchemaMetrics {
   };
 
   @Test
+  public void testPersistentMetric() {
+    for (BlockCategory blockCat : BlockCategory.values()) {
+      for (boolean isCompaction : HConstants.BOOLEAN_VALUES) {
+        for (BlockMetricType bmt : BlockMetricType.values()) {
+          if (!bmt.compactionAware() && isCompaction) {
+            continue;
+          }
+          String metricKey = schemaMetrics.getBlockMetricName(blockCat, isCompaction, bmt);
+          assertEquals("Incorrectly identified whether metric key is persistent: " + metricKey,
+              bmt.persistent(), SchemaMetrics.isPersistentMetricKey(metricKey));
+        }
+      }
+    }
+  }
+
+  @Test
+  public void testRemoveBlockCategoryFromMetricKey() {
+    for (BlockCategory blockCat : BlockCategory.values()) {
+      if (blockCat == BlockCategory.ALL_CATEGORIES) {
+        continue;
+      }
+      for (boolean isCompaction : HConstants.BOOLEAN_VALUES) {
+        for (BlockMetricType bmt : BlockMetricType.values()) {
+          if (!bmt.compactionAware() && isCompaction) {
+            continue;
+          }
+          String metricKey = schemaMetrics.getBlockMetricName(blockCat, isCompaction, bmt);
+          String metricKeyNoBlockCat =
+              schemaMetrics.getBlockMetricName(BlockCategory.ALL_CATEGORIES, isCompaction, bmt);
+          assertEquals(metricKeyNoBlockCat,
+              SchemaMetrics.BLOCK_CATEGORY_RE.matcher(metricKey).replaceAll(""));
+        }
+      }
+    }
+
+  }
+
+  @Test
   public void testNaming() {
-    final String metricPrefix = (useTableName ? SchemaMetrics.TABLE_PREFIX +
-        TABLE_NAME + "." : "") + SchemaMetrics.CF_PREFIX + CF_NAME + ".";
-    SchemaMetrics schemaMetrics = SchemaMetrics.getInstance(TABLE_NAME,
-        CF_NAME);
     SchemaMetrics ALL_CF_METRICS = SchemaMetrics.ALL_SCHEMA_METRICS;
 
     // fsReadTimeMetric
@@ -182,12 +228,17 @@ public class TestSchemaMetrics {
     SchemaMetrics.validateMetricChanges(startingMetrics);
   }
 
+  private static final int NUM_TABLES = 3;
+  private static final int NUM_FAMILIES = 3;
+  private static final int BLOCK_SIZE_RANGE = 1024 * 1024;
+  private static final int READ_TIME_MS_RANGE = 1000;
+
   @Test
   public void testIncrements() {
     Random rand = new Random(23982737L);
-    for (int i = 1; i <= 3; ++i) {
+    for (int i = 1; i <= NUM_TABLES; ++i) {
       final String tableName = "table" + i;
-      for (int j = 1; j <= 3; ++j) {
+      for (int j = 1; j <= NUM_FAMILIES; ++j) {
         final String cfName = "cf" + j;
         SchemaMetrics sm = SchemaMetrics.getInstance(tableName, cfName);
         for (boolean isInBloom : BOOL_VALUES) {
@@ -203,13 +254,13 @@ public class TestSchemaMetrics {
           for (boolean isCompaction : BOOL_VALUES) {
             sm.updateOnCacheHit(blockCat, isCompaction);
             checkMetrics();
-            sm.updateOnCacheMiss(blockCat, isCompaction, rand.nextInt());
+            sm.updateOnCacheMiss(blockCat, isCompaction, rand.nextInt(READ_TIME_MS_RANGE));
             checkMetrics();
           }
 
           for (boolean isEviction : BOOL_VALUES) {
-            sm.updateOnCachePutOrEvict(blockCat, (isEviction ? -1 : 1)
-                * rand.nextInt(1024 * 1024));
+            int encodedDelta = (isEviction ? -1 : 1) * (rand.nextInt(BLOCK_SIZE_RANGE) + 1);
+            sm.updateOnCachePutOrEvict(blockCat, encodedDelta, 2 * encodedDelta);
           }
         }
       }



Mime
View raw message