hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nspiegelb...@apache.org
Subject svn commit: r1201994 [2/2] - in /hbase/trunk/src: main/java/org/apache/hadoop/hbase/io/hfile/ main/java/org/apache/hadoop/hbase/regionserver/ main/java/org/apache/hadoop/hbase/regionserver/metrics/ test/java/org/apache/hadoop/hbase/ test/java/org/apach...
Date Tue, 15 Nov 2011 01:26:06 GMT
Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java?rev=1201994&view=auto
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java (added)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java Tue Nov 15 01:26:05 2011
@@ -0,0 +1,759 @@
+/*
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver.metrics;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.TreeSet;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.commons.lang.mutable.MutableDouble;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+
+/**
+ * A collection of metric names in a given column family or a (table, column
+ * family) combination. The following "dimensions" are supported:
+ * <ul>
+ * <li>Table name (optional; enabled based on configuration)</li>
+ * <li>Per-column family vs. aggregated. The aggregated mode is only supported
+ * when table name is not included.</li>
+ * <li>Block category (data, index, bloom filter, etc.)</li>
+ * <li>Whether the request is part of a compaction</li>
+ * <li>Metric type (read time, block read count, cache hits/misses, etc.)</li>
+ * </ul>
+ * <p>
+ * An instance of this class does not store any metric values. It just allows
+ * to determine the correct metric name for each combination of the above
+ * dimensions.
+ * <p>
+ * <table>
+ * <tr>
+ * <th rowspan="2">Metric key</th>
+ * <th colspan="2">Per-table metrics conf setting</th>
+ * <th rowspan="2">Description</th>
+ * </tr>
+ * <tr>
+ * <th>On</th>
+ * <th>Off</th>
+ * </th>
+ * <tr>
+ *   <td> tbl.T.cf.CF.M </td> <td> Include </td> <td> Skip    </td>
+ *   <td> A specific column family of a specific table        </td>
+ * </tr>
+ * <tr>
+ *   <td> tbl.T.M       </td> <td> Skip    </td> <td> Skip    </td>
+ *   <td> All column families in the given table              </td>
+ * </tr>
+ * <tr>
+ *   <td> cf.CF.M       </td> <td> Skip    </td> <td> Include </td>
+ *   <td> A specific column family in all tables              </td>
+ * </tr>
+ * <tr>
+ *   <td> M             </td> <td> Include </td> <td> Include </td>
+ *   <td> All column families in all tables                   </td>
+ * </tr>
+ * </table>
+ */
+public class SchemaMetrics {
+
+  public interface SchemaAware {
+    public String getTableName();
+    public String getColumnFamilyName();
+    public SchemaMetrics getSchemaMetrics();
+  }
+
+  private static final Log LOG = LogFactory.getLog(SchemaMetrics.class);
+
+  public static enum BlockMetricType {
+    // Metric configuration: compactionAware, timeVarying
+    READ_TIME("Read",                   true, true),
+    READ_COUNT("BlockReadCnt",          true, false),
+    CACHE_HIT("BlockReadCacheHitCnt",   true, false),
+    CACHE_MISS("BlockReadCacheMissCnt", true, false),
+
+    CACHE_SIZE("blockCacheSize",        false, false),
+    CACHED("blockCacheNumCached",       false, false),
+    EVICTED("blockCacheNumEvicted",     false, false);
+
+    private final String metricStr;
+    private final boolean compactionAware;
+    private final boolean timeVarying;
+
+    BlockMetricType(String metricStr, boolean compactionAware,
+          boolean timeVarying) {
+      this.metricStr = metricStr;
+      this.compactionAware = compactionAware;
+      this.timeVarying = timeVarying;
+    }
+
+    @Override
+    public String toString() {
+      return metricStr;
+    }
+
+    private static final String BLOCK_METRIC_TYPE_RE;
+    static {
+      StringBuilder sb = new StringBuilder();
+      for (BlockMetricType bmt : values()) {
+        if (sb.length() > 0)
+          sb.append("|");
+        sb.append(bmt);
+      }
+      BLOCK_METRIC_TYPE_RE = sb.toString();
+    }
+  };
+
+  public static enum StoreMetricType {
+    STORE_FILE_COUNT("storeFileCount"),
+    STORE_FILE_INDEX_SIZE("storeFileIndexSizeMB"),
+    STORE_FILE_SIZE_MB("storeFileSizeMB"),
+    STATIC_BLOOM_SIZE_KB("staticBloomSizeKB"),
+    MEMSTORE_SIZE_MB("memstoreSizeMB"),
+    STATIC_INDEX_SIZE_KB("staticIndexSizeKB"),
+    FLUSH_SIZE("flushSize");
+
+    private final String metricStr;
+
+    StoreMetricType(String metricStr) {
+      this.metricStr = metricStr;
+    }
+
+    @Override
+    public String toString() {
+      return metricStr;
+    }
+  };
+
+  // Constants
+  /**
+   * A string used when column family or table name is unknown, and in some
+   * unit tests. This should not normally show up in metric names but if it
+   * does it is better than creating a silent discrepancy in total vs.
+   * per-CF/table metrics.
+   */
+  public static final String UNKNOWN = "__unknown";
+
+  private static final String TABLE_PREFIX = "tbl.";
+  public static final String CF_PREFIX = "cf.";
+  public static final String BLOCK_TYPE_PREFIX = "bt.";
+
+  /**
+   * A special schema metric value that means "all tables aggregated" or
+   * "all column families aggregated" when used as a table name or a column
+   * family name.
+   */
+  public static final String TOTAL_KEY = "";
+
+  /**
+   * Special handling for meta-block-specific metrics for
+   * backwards-compatibility.
+   */
+  private static final String META_BLOCK_CATEGORY_STR = "Meta";
+
+  private static final int NUM_BLOCK_CATEGORIES =
+      BlockCategory.values().length;
+
+  private static final int NUM_METRIC_TYPES =
+      BlockMetricType.values().length;
+
+  static final boolean[] BOOL_VALUES = new boolean[] { false, true };
+
+  private static final int NUM_BLOCK_METRICS =
+      NUM_BLOCK_CATEGORIES *  // blockCategory
+      BOOL_VALUES.length *    // isCompaction
+      NUM_METRIC_TYPES;       // metricType
+
+  private static final int NUM_STORE_METRIC_TYPES =
+      StoreMetricType.values().length;
+
+  /** Conf key controlling whether we include table name in metric names */
+  private static final String SHOW_TABLE_NAME_CONF_KEY =
+      "hbase.metrics.showTableName";
+
+  // Global variables
+  /** All instances of this class */
+  private static final ConcurrentHashMap<String, SchemaMetrics>
+      cfToMetrics = new ConcurrentHashMap<String, SchemaMetrics>();
+
+  /** Metrics for all tables and column families. */
+  // This has to be initialized after cfToMetrics.
+  public static final SchemaMetrics ALL_SCHEMA_METRICS =
+    getInstance(TOTAL_KEY, TOTAL_KEY);
+
+  /**
+   * Whether to include table name in metric names. If this is null, it has not
+   * been initialized. This is a global instance, but we also have a copy of it
+   * per a {@link SchemaMetrics} object to avoid synchronization overhead.
+   */
+  private static volatile Boolean useTableNameGlobally;
+
+  /** Whether we logged a message about configuration inconsistency */
+  private static volatile boolean loggedConfInconsistency;
+
+  // Instance variables
+  private final String[] blockMetricNames = new String[NUM_BLOCK_METRICS];
+  private final boolean[] blockMetricTimeVarying =
+      new boolean[NUM_BLOCK_METRICS];
+
+  private final String[] bloomMetricNames = new String[2];
+  private final String[] storeMetricNames = new String[NUM_STORE_METRIC_TYPES];
+
+  private SchemaMetrics(final String tableName, final String cfName) {
+    String metricPrefix =
+        tableName.equals(TOTAL_KEY) ? "" : TABLE_PREFIX + tableName + ".";
+    metricPrefix += cfName.equals(TOTAL_KEY) ? "" : CF_PREFIX + cfName + ".";
+
+    for (BlockCategory blockCategory : BlockCategory.values()) {
+      for (boolean isCompaction : BOOL_VALUES) {
+        for (BlockMetricType metricType : BlockMetricType.values()) {
+          if (!metricType.compactionAware && isCompaction) {
+            continue;
+          }
+
+          StringBuilder sb = new StringBuilder(metricPrefix);
+          if (blockCategory != BlockCategory.ALL_CATEGORIES
+              && blockCategory != BlockCategory.META) {
+            String categoryStr = blockCategory.toString();
+            categoryStr = categoryStr.charAt(0)
+                + categoryStr.substring(1).toLowerCase();
+            sb.append(BLOCK_TYPE_PREFIX + categoryStr + ".");
+          }
+
+          if (metricType.compactionAware) {
+            sb.append(isCompaction ? "compaction" : "fs");
+          }
+
+          // A special-case for meta blocks for backwards-compatibility.
+          if (blockCategory == BlockCategory.META) {
+            sb.append(META_BLOCK_CATEGORY_STR);
+          }
+
+          sb.append(metricType);
+
+          int i = getBlockMetricIndex(blockCategory, isCompaction, metricType);
+          blockMetricNames[i] = sb.toString().intern();
+          blockMetricTimeVarying[i] = metricType.timeVarying;
+        }
+      }
+    }
+
+    for (boolean isInBloom : BOOL_VALUES) {
+      bloomMetricNames[isInBloom ? 1 : 0] = metricPrefix
+          + (isInBloom ? "keyMaybeInBloomCnt" : "keyNotInBloomCnt");
+    }
+
+    for (StoreMetricType storeMetric : StoreMetricType.values()) {
+      storeMetricNames[storeMetric.ordinal()] = metricPrefix +
+        storeMetric.toString();
+    }
+  }
+
+  /**
+   * Returns a {@link SchemaMetrics} object for the given table and column
+   * family, instantiating it if necessary.
+   *
+   * @param tableName table name (null is interpreted as "unknown"). This is
+   *          ignored
+   * @param cfName column family name (null is interpreted as "unknown")
+   */
+  public static SchemaMetrics getInstance(String tableName, String cfName) {
+    if (tableName == null) {
+      tableName = UNKNOWN;
+    }
+
+    if (!tableName.equals(TOTAL_KEY)) {
+      // We are provided with a non-trivial table name (including "unknown").
+      // We need to know whether table name should be included into metrics.
+      if (useTableNameGlobally == null) {
+        throw new IllegalStateException("The value of the "
+            + SHOW_TABLE_NAME_CONF_KEY + " conf option has not been specified "
+            + "in SchemaMetrics");
+      }
+      final boolean useTableName = useTableNameGlobally;
+      if (!useTableName) {
+        // Don't include table name in metric keys.
+        tableName = TOTAL_KEY;
+      }
+    }
+
+    if (cfName == null) {
+      cfName = UNKNOWN;
+    }
+
+    final String instanceKey = tableName + "\t" + cfName;
+    SchemaMetrics schemaMetrics = cfToMetrics.get(instanceKey);
+    if (schemaMetrics != null) {
+      return schemaMetrics;
+    }
+
+    schemaMetrics = new SchemaMetrics(tableName, cfName);
+    SchemaMetrics existingMetrics = cfToMetrics.putIfAbsent(instanceKey,
+        schemaMetrics);
+    return existingMetrics != null ? existingMetrics : schemaMetrics;
+  }
+
+  private static final int getBlockMetricIndex(BlockCategory blockCategory,
+      boolean isCompaction, BlockMetricType metricType) {
+    int i = 0;
+    i = i * NUM_BLOCK_CATEGORIES + blockCategory.ordinal();
+    i = i * BOOL_VALUES.length + (isCompaction ? 1 : 0);
+    i = i * NUM_METRIC_TYPES + metricType.ordinal();
+    return i;
+  }
+
+  public String getBlockMetricName(BlockCategory blockCategory,
+      boolean isCompaction, BlockMetricType metricType) {
+    if (isCompaction && !metricType.compactionAware) {
+      throw new IllegalArgumentException("isCompaction cannot be true for "
+          + metricType);
+    }
+    return blockMetricNames[getBlockMetricIndex(blockCategory, isCompaction,
+        metricType)];
+  }
+
+  public String getBloomMetricName(boolean isInBloom) {
+    return bloomMetricNames[isInBloom ? 1 : 0];
+  }
+
+  /**
+   * Increments the given metric, both per-CF and aggregate, for both the given
+   * category and all categories in aggregate (four counters total).
+   */
+  private void incrNumericMetric(BlockCategory blockCategory,
+      boolean isCompaction, BlockMetricType metricType) {
+    if (blockCategory == null) {
+      blockCategory = BlockCategory.UNKNOWN;  // So that we see this in stats.
+    }
+    HRegion.incrNumericMetric(getBlockMetricName(blockCategory,
+        isCompaction, metricType), 1);
+
+    if (blockCategory != BlockCategory.ALL_CATEGORIES) {
+      incrNumericMetric(BlockCategory.ALL_CATEGORIES, isCompaction,
+          metricType);
+    }
+  }
+
+  private void addToReadTime(BlockCategory blockCategory,
+      boolean isCompaction, long timeMs) {
+    HRegion.incrTimeVaryingMetric(getBlockMetricName(blockCategory,
+        isCompaction, BlockMetricType.READ_TIME), timeMs);
+
+    // Also update the read time aggregated across all block categories
+    if (blockCategory != BlockCategory.ALL_CATEGORIES) {
+      addToReadTime(BlockCategory.ALL_CATEGORIES, isCompaction, timeMs);
+    }
+  }
+
+  /**
+   * Used to accumulate store metrics across multiple regions in a region
+   * server.  These metrics are not "persistent", i.e. we keep overriding them
+   * on every update instead of incrementing, so we need to accumulate them in
+   * a temporary map before pushing them to the global metric collection.
+   * @param tmpMap a temporary map for accumulating store metrics
+   * @param storeMetricType the store metric type to increment
+   * @param val the value to add to the metric
+   */
+  public void accumulateStoreMetric(final Map<String, MutableDouble> tmpMap,
+      StoreMetricType storeMetricType, double val) {
+    final String key = getStoreMetricName(storeMetricType);
+    if (tmpMap.get(key) != null) {
+      tmpMap.get(key).add(val);
+    } else {
+      tmpMap.put(key, new MutableDouble(val));
+    }
+
+    if (this != ALL_SCHEMA_METRICS) {
+      ALL_SCHEMA_METRICS.accumulateStoreMetric(tmpMap, storeMetricType, val);
+    }
+  }
+
+  public String getStoreMetricName(StoreMetricType storeMetricType) {
+    return storeMetricNames[storeMetricType.ordinal()];
+  }
+
+  /**
+   * Update a metric that does not get reset on every poll.
+   * @param storeMetricType the store metric to update
+   * @param value the value to update the metric to
+   */
+  public void updatePersistentStoreMetric(StoreMetricType storeMetricType,
+      long value) {
+    HRegion.incrNumericPersistentMetric(
+        storeMetricNames[storeMetricType.ordinal()], value);
+  }
+
+  /**
+   * Updates the number of hits and the total number of block reads on a block
+   * cache hit.
+   */
+  public void updateOnCacheHit(BlockCategory blockCategory,
+      boolean isCompaction) {
+    blockCategory.expectSpecific();
+    incrNumericMetric(blockCategory, isCompaction, BlockMetricType.CACHE_HIT);
+    incrNumericMetric(blockCategory, isCompaction, BlockMetricType.READ_COUNT);
+    if (this != ALL_SCHEMA_METRICS) {
+      ALL_SCHEMA_METRICS.updateOnCacheHit(blockCategory, isCompaction);
+    }
+  }
+
+  /**
+   * Updates read time, the number of misses, and the total number of block
+   * reads on a block cache miss.
+   */
+  public void updateOnCacheMiss(BlockCategory blockCategory,
+      boolean isCompaction, long timeMs) {
+    blockCategory.expectSpecific();
+    addToReadTime(blockCategory, isCompaction, timeMs);
+    incrNumericMetric(blockCategory, isCompaction, BlockMetricType.CACHE_MISS);
+    incrNumericMetric(blockCategory, isCompaction, BlockMetricType.READ_COUNT);
+    if (this != ALL_SCHEMA_METRICS) {
+      ALL_SCHEMA_METRICS.updateOnCacheMiss(blockCategory, isCompaction,
+          timeMs);
+    }
+  }
+
+  /**
+   * Adds the given delta to the cache size for the given block category and
+   * the aggregate metric for all block categories. Updates both the per-CF
+   * counter and the counter for all CFs (four metrics total). The cache size
+   * metric is "persistent", i.e. it does not get reset when metrics are
+   * collected.
+   */
+  public void addToCacheSize(BlockCategory category, long cacheSizeDelta) {
+    if (category == null) {
+      category = BlockCategory.ALL_CATEGORIES;
+    }
+    HRegion.incrNumericPersistentMetric(getBlockMetricName(category, false,
+        BlockMetricType.CACHE_SIZE), cacheSizeDelta);
+
+    if (category != BlockCategory.ALL_CATEGORIES) {
+      addToCacheSize(BlockCategory.ALL_CATEGORIES, cacheSizeDelta);
+    }
+  }
+
+  public void updateOnCachePutOrEvict(BlockCategory blockCategory,
+      long cacheSizeDelta, boolean isEviction) {
+    addToCacheSize(blockCategory, cacheSizeDelta);
+    incrNumericMetric(blockCategory, false,
+        isEviction ? BlockMetricType.EVICTED : BlockMetricType.CACHED);
+    if (this != ALL_SCHEMA_METRICS) {
+      ALL_SCHEMA_METRICS.updateOnCachePutOrEvict(blockCategory, cacheSizeDelta,
+          isEviction);
+    }
+  }
+
+  /**
+   * Increments both the per-CF and the aggregate counter of bloom
+   * positives/negatives as specified by the argument.
+   */
+  public void updateBloomMetrics(boolean isInBloom) {
+    HRegion.incrNumericMetric(getBloomMetricName(isInBloom), 1);
+    if (this != ALL_SCHEMA_METRICS) {
+      ALL_SCHEMA_METRICS.updateBloomMetrics(isInBloom);
+    }
+  }
+
+  /**
+   * Sets the flag whether to use table name in metric names according to the
+   * given configuration. This must be called at least once before
+   * instantiating HFile readers/writers.
+   */
+  public static void configureGlobally(Configuration conf) {
+    final boolean useTableNameNew =
+        conf.getBoolean(SHOW_TABLE_NAME_CONF_KEY, false);
+    setUseTableName(useTableNameNew);
+  }
+
+  /**
+   * Sets the flag of whether to use table name in metric names. This flag
+   * is specified in configuration and is not expected to change at runtime,
+   * so we log an error message when it does change.
+   */
+  private static void setUseTableName(final boolean useTableNameNew) {
+    if (useTableNameGlobally == null) {
+      // This configuration option has not yet been set.
+      useTableNameGlobally = useTableNameNew;
+    } else if (useTableNameGlobally != useTableNameNew
+        && !loggedConfInconsistency) {
+      // The configuration is inconsistent and we have not reported it
+      // previously. Once we report it, just keep ignoring the new setting.
+      LOG.error("Inconsistent configuration. Previous configuration "
+          + "for using table name in metrics: " + useTableNameGlobally + ", "
+          + "new configuration: " + useTableNameNew);
+      loggedConfInconsistency = true;
+    }
+  }
+
+  // Methods used in testing
+
+  private static final String regexEscape(String s) {
+    return s.replace(".", "\\.");
+  }
+
+  /**
+   * Assume that table names used in tests don't contain dots, except for the
+   * META table.
+   */
+  private static final String WORD_AND_DOT_RE_STR = "([^.]+|" +
+      regexEscape(Bytes.toString(HConstants.META_TABLE_NAME)) +
+      ")\\.";
+
+  /** "tbl.<table_name>." */
+  private static final String TABLE_NAME_RE_STR =
+      "\\b" + regexEscape(TABLE_PREFIX) + WORD_AND_DOT_RE_STR;
+
+  /** "cf.<cf_name>." */
+  private static final String CF_NAME_RE_STR =
+      "\\b" + regexEscape(CF_PREFIX) + WORD_AND_DOT_RE_STR;
+  private static final Pattern CF_NAME_RE = Pattern.compile(CF_NAME_RE_STR);
+
+  /** "tbl.<table_name>.cf.<cf_name>." */
+  private static final Pattern TABLE_AND_CF_NAME_RE = Pattern.compile(
+      TABLE_NAME_RE_STR + CF_NAME_RE_STR);
+
+  private static final Pattern BLOCK_CATEGORY_RE = Pattern.compile(
+      "\\b" + regexEscape(BLOCK_TYPE_PREFIX) + "[^.]+\\." +
+      // Also remove the special-case block type marker for meta blocks
+      "|" + META_BLOCK_CATEGORY_STR + "(?=" +
+      BlockMetricType.BLOCK_METRIC_TYPE_RE + ")");
+
+  /**
+   * A suffix for the "number of operations" part of "time-varying metrics". We
+   * only use this for metric verification in unit testing. Time-varying
+   * metrics are handled by a different code path in production.
+   */
+  private static String NUM_OPS_SUFFIX = "numops";
+
+  /**
+   * A custom suffix that we use for verifying the second component of
+   * a "time-varying metric".
+   */
+  private static String TOTAL_SUFFIX = "_total";
+  private static final Pattern TIME_VARYING_SUFFIX_RE = Pattern.compile(
+      "(" + NUM_OPS_SUFFIX + "|" + TOTAL_SUFFIX + ")$");
+
+  void printMetricNames() {
+    for (BlockCategory blockCategory : BlockCategory.values()) {
+      for (boolean isCompaction : BOOL_VALUES) {
+        for (BlockMetricType metricType : BlockMetricType.values()) {
+          int i = getBlockMetricIndex(blockCategory, isCompaction, metricType);
+          LOG.debug("blockCategory=" + blockCategory + ", "
+              + "metricType=" + metricType + ", isCompaction=" + isCompaction
+              + ", metricName=" + blockMetricNames[i]);
+        }
+      }
+    }
+  }
+
+  private Collection<String> getAllMetricNames() {
+    List<String> allMetricNames = new ArrayList<String>();
+    for (int i = 0; i < blockMetricNames.length; ++i) {
+      final String blockMetricName = blockMetricNames[i];
+      final boolean timeVarying = blockMetricTimeVarying[i];
+      if (blockMetricName != null) {
+        if (timeVarying) {
+          allMetricNames.add(blockMetricName + NUM_OPS_SUFFIX);
+          allMetricNames.add(blockMetricName + TOTAL_SUFFIX);
+        } else {
+          allMetricNames.add(blockMetricName);
+        }
+      }
+    }
+    allMetricNames.addAll(Arrays.asList(bloomMetricNames));
+    return allMetricNames;
+  }
+
+  private static final boolean isTimeVaryingKey(String metricKey) {
+    return metricKey.endsWith(NUM_OPS_SUFFIX)
+        || metricKey.endsWith(TOTAL_SUFFIX);
+  }
+
+  private static final String stripTimeVaryingSuffix(String metricKey) {
+    return TIME_VARYING_SUFFIX_RE.matcher(metricKey).replaceAll("");
+  }
+
+  public static Map<String, Long> getMetricsSnapshot() {
+    Map<String, Long> metricsSnapshot = new TreeMap<String, Long>();
+    for (SchemaMetrics cfm : cfToMetrics.values()) {
+      for (String metricName : cfm.getAllMetricNames()) {
+        long metricValue;
+        if (isTimeVaryingKey(metricName)) {
+          Pair<Long, Integer> totalAndCount =
+              HRegion.getTimeVaryingMetric(stripTimeVaryingSuffix(metricName));
+          metricValue = metricName.endsWith(TOTAL_SUFFIX) ?
+              totalAndCount.getFirst() : totalAndCount.getSecond();
+        } else {
+          metricValue = HRegion.getNumericMetric(metricName);
+        }
+
+        metricsSnapshot.put(metricName, metricValue);
+      }
+    }
+    return metricsSnapshot;
+  }
+
+  private static long getLong(Map<String, Long> m, String k) {
+    Long l = m.get(k);
+    return l != null ? l : 0;
+  }
+
+  private static void putLong(Map<String, Long> m, String k, long v) {
+    if (v != 0) {
+      m.put(k, v);
+    } else {
+      m.remove(k);
+    }
+  }
+  private static Map<String, Long> diffMetrics(Map<String, Long> a,
+      Map<String, Long> b) {
+    Set<String> allKeys = new TreeSet<String>(a.keySet());
+    allKeys.addAll(b.keySet());
+    Map<String, Long> diff = new TreeMap<String, Long>();
+    for (String k : allKeys) {
+      long aVal = getLong(a, k);
+      long bVal = getLong(b, k);
+      if (aVal != bVal) {
+        diff.put(k, bVal - aVal);
+      }
+    }
+    return diff;
+  }
+
+  public static void validateMetricChanges(Map<String, Long> oldMetrics) {
+    final Map<String, Long> newMetrics = getMetricsSnapshot();
+    final Map<String, Long> allCfDeltas = new TreeMap<String, Long>();
+    final Map<String, Long> allBlockCategoryDeltas =
+        new TreeMap<String, Long>();
+    final Map<String, Long> deltas = diffMetrics(oldMetrics, newMetrics);
+    final Pattern cfTableMetricRE =
+        useTableNameGlobally ? TABLE_AND_CF_NAME_RE : CF_NAME_RE;
+    final Set<String> allKeys = new TreeSet<String>(oldMetrics.keySet());
+    allKeys.addAll(newMetrics.keySet());
+
+    for (SchemaMetrics cfm : cfToMetrics.values()) {
+      for (String metricName : cfm.getAllMetricNames()) {
+        if (metricName.startsWith(CF_PREFIX + CF_PREFIX)) {
+          throw new AssertionError("Column family prefix used twice: " +
+              metricName);
+        }
+
+        final long oldValue = getLong(oldMetrics, metricName);
+        final long newValue = getLong(newMetrics, metricName);
+        final long delta = newValue - oldValue;
+
+        // Re-calculate values of metrics with no column family (or CF/table)
+        // specified based on all metrics with CF (or CF/table) specified.
+        if (delta != 0) {
+          if (cfm != ALL_SCHEMA_METRICS) {
+            final String aggregateMetricName =
+                cfTableMetricRE.matcher(metricName).replaceAll("");
+            if (!aggregateMetricName.equals(metricName)) {
+              LOG.debug("Counting " + delta + " units of " + metricName
+                  + " towards " + aggregateMetricName);
+
+              putLong(allCfDeltas, aggregateMetricName,
+                  getLong(allCfDeltas, aggregateMetricName) + delta);
+            }
+          } else {
+            LOG.debug("Metric=" + metricName + ", delta=" + delta);
+          }
+        }
+
+        Matcher matcher = BLOCK_CATEGORY_RE.matcher(metricName);
+        if (matcher.find()) {
+           // Only process per-block-category metrics
+          String metricNoBlockCategory = matcher.replaceAll("");
+
+          putLong(allBlockCategoryDeltas, metricNoBlockCategory,
+              getLong(allBlockCategoryDeltas, metricNoBlockCategory) + delta);
+        }
+      }
+    }
+
+    StringBuilder errors = new StringBuilder();
+    for (String key : ALL_SCHEMA_METRICS.getAllMetricNames()) {
+      long actual = getLong(deltas, key);
+      long expected = getLong(allCfDeltas, key);
+      if (actual != expected) {
+        if (errors.length() > 0)
+          errors.append("\n");
+        errors.append("The all-CF metric " + key + " changed by "
+            + actual + " but the aggregation of per-CF/table metrics "
+            + "yields " + expected);
+      }
+    }
+
+    // Verify metrics computed for all block types based on the aggregation
+    // of per-block-type metrics.
+    for (String key : allKeys) {
+      if (BLOCK_CATEGORY_RE.matcher(key).find() ||
+          key.contains(ALL_SCHEMA_METRICS.getBloomMetricName(false)) ||
+          key.contains(ALL_SCHEMA_METRICS.getBloomMetricName(true))){
+        // Skip per-block-category metrics. Also skip bloom filters, because
+        // they are not aggregated per block type.
+        continue;
+      }
+      long actual = getLong(deltas, key);
+      long expected = getLong(allBlockCategoryDeltas, key);
+      if (actual != expected) {
+        if (errors.length() > 0)
+          errors.append("\n");
+        errors.append("The all-block-category metric " + key
+            + " changed by " + actual + " but the aggregation of "
+            + "per-block-category metrics yields " + expected);
+      }
+    }
+
+    if (errors.length() > 0) {
+      throw new AssertionError(errors.toString());
+    }
+  }
+
+  /**
+   * Creates an instance pretending both the table and column family are
+   * unknown. Used in unit tests.
+   */
+  public static SchemaMetrics getUnknownInstanceForTest() {
+    return getInstance(UNKNOWN, UNKNOWN);
+  }
+
+  /**
+   * Set the flag to use or not use table name in metric names. Used in unit
+   * tests, so the flag can be set arbitrarily.
+   */
+  public static void setUseTableNameInTest(final boolean useTableNameNew) {
+    useTableNameGlobally = useTableNameNew;
+  }
+
+}

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java?rev=1201994&r1=1201993&r2=1201994&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java Tue Nov 15 01:26:05 2011
@@ -33,6 +33,7 @@ import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 import java.util.NavigableSet;
+import java.util.Random;
 import java.util.UUID;
 
 import org.apache.commons.logging.Log;
@@ -71,10 +72,11 @@ import org.apache.hadoop.hbase.zookeeper
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.mapred.MiniMRCluster;
 import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.ZooKeeper;
 import org.apache.zookeeper.KeeperException.NodeExistsException;
+import org.apache.zookeeper.ZooKeeper;
 
 /**
  * Facility for testing HBase. Replacement for
@@ -134,6 +136,13 @@ public class HBaseTestingUtility {
       { Compression.Algorithm.GZ }
     });
 
+  /** This is for unit tests parameterized with a single boolean. */
+  public static final List<Object[]> BOOLEAN_PARAMETERIZED =
+      Arrays.asList(new Object[][] {
+          { new Boolean(false) },
+          { new Boolean(true) }
+      });
+
   /** Compression algorithms to use in testing */
   public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
       Compression.Algorithm.NONE, Compression.Algorithm.GZ
@@ -675,6 +684,25 @@ public class HBaseTestingUtility {
         new Configuration(getConfiguration()));
   }
 
+  public HTable createTable(byte[] tableName, byte[][] families,
+      int numVersions, byte[] startKey, byte[] endKey, int numRegions)
+  throws IOException{
+    HTableDescriptor desc = new HTableDescriptor(tableName);
+    for (byte[] family : families) {
+      HColumnDescriptor hcd = new HColumnDescriptor(family, numVersions,
+          HColumnDescriptor.DEFAULT_COMPRESSION,
+          HColumnDescriptor.DEFAULT_IN_MEMORY,
+          HColumnDescriptor.DEFAULT_BLOCKCACHE,
+          Integer.MAX_VALUE, HColumnDescriptor.DEFAULT_TTL,
+          HColumnDescriptor.DEFAULT_BLOOMFILTER,
+          HColumnDescriptor.DEFAULT_REPLICATION_SCOPE);
+      desc.addFamily(hcd);
+    }
+    (new HBaseAdmin(getConfiguration())).createTable(desc, startKey,
+        endKey, numRegions);
+    return new HTable(getConfiguration(), tableName);
+  }
+
   /**
    * Create a table.
    * @param tableName
@@ -1608,4 +1636,82 @@ public class HBaseTestingUtility {
             HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
   }
 
+  /** Creates a random table with the given parameters */
+  public HTable createRandomTable(String tableName,
+      final Collection<String> families,
+      final int maxVersions,
+      final int numColsPerRow,
+      final int numFlushes,
+      final int numRegions,
+      final int numRowsPerFlush)
+      throws IOException, InterruptedException {
+
+    LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
+        " regions, " + numFlushes + " storefiles per region, " +
+        numRowsPerFlush + " rows per flush, maxVersions=" +  maxVersions +
+        "\n");
+
+    final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
+    final int numCF = families.size();
+    final byte[][] cfBytes = new byte[numCF][];
+    final byte[] tableNameBytes = Bytes.toBytes(tableName);
+
+    {
+      int cfIndex = 0;
+      for (String cf : families) {
+        cfBytes[cfIndex++] = Bytes.toBytes(cf);
+      }
+    }
+
+    final int actualStartKey = 0;
+    final int actualEndKey = Integer.MAX_VALUE;
+    final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
+    final int splitStartKey = actualStartKey + keysPerRegion;
+    final int splitEndKey = actualEndKey - keysPerRegion;
+    final String keyFormat = "%08x";
+    final HTable table = createTable(tableNameBytes, cfBytes,
+        maxVersions,
+        Bytes.toBytes(String.format(keyFormat, splitStartKey)),
+        Bytes.toBytes(String.format(keyFormat, splitEndKey)),
+        numRegions);
+    hbaseCluster.flushcache(HConstants.META_TABLE_NAME);
+
+    for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
+      for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
+        final byte[] row = Bytes.toBytes(String.format(keyFormat,
+            actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
+
+        Put put = new Put(row);
+        Delete del = new Delete(row);
+        for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
+          final byte[] cf = cfBytes[rand.nextInt(numCF)];
+          final long ts = rand.nextInt();
+          final byte[] qual = Bytes.toBytes("col" + iCol);
+          if (rand.nextBoolean()) {
+            final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
+                "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
+                ts + "_random_" + rand.nextLong());
+            put.add(cf, qual, ts, value);
+          } else if (rand.nextDouble() < 0.8) {
+            del.deleteColumn(cf, qual, ts);
+          } else {
+            del.deleteColumns(cf, qual, ts);
+          }
+        }
+
+        if (!put.isEmpty()) {
+          table.put(put);
+        }
+
+        if (!del.isEmpty()) {
+          table.delete(del);
+        }
+      }
+      LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
+      table.flushCommits();
+      hbaseCluster.flushcache(tableNameBytes);
+    }
+
+    return table;
+  }
 }

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java?rev=1201994&r1=1201993&r2=1201994&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java Tue Nov 15 01:26:05 2011
@@ -37,6 +37,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.hbase.MultithreadedTestUtil;
 import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread;
 import org.apache.hadoop.hbase.io.HeapSize;
+import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
 
 public class CacheTestUtils {
 
@@ -275,6 +276,16 @@ public class CacheTestUtils {
       };
     }
 
+    @Override
+    public BlockType getBlockType() {
+      return BlockType.DATA;
+    }
+
+    @Override
+    public SchemaMetrics getSchemaMetrics() {
+      return SchemaMetrics.getUnknownInstanceForTest();
+    }
+
   }
 
   private static HFileBlockPair[] generateHFileBlocks(int blockSize,

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java?rev=1201994&r1=1201993&r2=1201994&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java Tue Nov 15 01:26:05 2011
@@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.io.hfile
 import java.nio.ByteBuffer;
 
 import java.util.LinkedList;
+import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
 
 import junit.framework.TestCase;
 import org.apache.hadoop.hbase.SmallTests;
@@ -117,7 +118,7 @@ public class TestCachedBlockQueue extend
   {
     public CachedBlock(final long heapSize, String name, long accessTime) {
       super(name,
-          new Cacheable(){
+          new Cacheable() {
             @Override
             public long heapSize() {
               return ((int)(heapSize - CachedBlock.PER_BLOCK_OVERHEAD));
@@ -132,13 +133,22 @@ public class TestCachedBlockQueue extend
             public void serialize(ByteBuffer destination) {
             }
 
-
             @Override
             public CacheableDeserializer<Cacheable> getDeserializer() {
               // TODO Auto-generated method stub
               return null;
-            }},
-          accessTime,false);
+            }
+
+            @Override
+            public BlockType getBlockType() {
+              return BlockType.DATA;
+            }
+
+            @Override
+            public SchemaMetrics getSchemaMetrics() {
+              return SchemaMetrics.ALL_SCHEMA_METRICS;
+            }
+          }, accessTime, false);
     }
   }
 }

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java?rev=1201994&r1=1201993&r2=1201994&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java Tue Nov 15 01:26:05 2011
@@ -24,6 +24,7 @@ import java.io.DataOutput;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.Arrays;
+import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -34,6 +35,7 @@ import org.apache.hadoop.hbase.*;
 import org.apache.hadoop.hbase.KeyValue.KeyComparator;
 import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
 import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
+import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.Writable;
 import org.junit.experimental.categories.Category;
@@ -54,12 +56,22 @@ public class TestHFile extends HBaseTest
   private final int minBlockSize = 512;
   private static String localFormatter = "%010d";
   private static CacheConfig cacheConf = null;
+  private Map<String, Long> startingMetrics;
 
+  @Override
   public void setUp() throws Exception {
-    super.setUp();
+    startingMetrics = SchemaMetrics.getMetricsSnapshot();
     ROOT_DIR = this.getUnitTestdir("TestHFile").toString();
+    super.setUp();
   }
 
+  @Override
+  public void tearDown() throws Exception {
+    super.tearDown();
+    SchemaMetrics.validateMetricChanges(startingMetrics);
+  }
+
+
   /**
    * Test empty HFile.
    * Test all features work reasonably when hfile is empty of entries.

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java?rev=1201994&r1=1201993&r2=1201994&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java Tue Nov 15 01:26:05 2011
@@ -501,11 +501,17 @@ public class TestHFileBlock {
       ByteBuffer buf = ByteBuffer.wrap(byteArr, 0, size);
       HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, buf,
           true, -1);
-      long expected = ClassSize.align(ClassSize.estimateBase(HFileBlock.class,
-          true)
-          + ClassSize.estimateBase(buf.getClass(), true)
-          + HFileBlock.HEADER_SIZE + size);
-      assertEquals(expected, block.heapSize());
+      assertEquals(80, HFileBlock.BYTE_BUFFER_HEAP_SIZE);
+      long byteBufferExpectedSize =
+          ClassSize.align(ClassSize.estimateBase(buf.getClass(), true)
+              + HFileBlock.HEADER_SIZE + size);
+      long hfileBlockExpectedSize =
+          ClassSize.align(ClassSize.estimateBase(HFileBlock.class, true));
+      long expected = hfileBlockExpectedSize + byteBufferExpectedSize;
+      assertEquals("Block data size: " + size + ", byte buffer expected " +
+          "size: " + byteBufferExpectedSize + ", HFileBlock class expected " +
+          "size: " + hfileBlockExpectedSize + ";", expected,
+          block.heapSize());
     }
   }
 

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderV1.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderV1.java?rev=1201994&r1=1201993&r2=1201994&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderV1.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderV1.java Tue Nov 15 01:26:05 2011
@@ -22,14 +22,17 @@ package org.apache.hadoop.hbase.io.hfile
 
 import java.io.IOException;
 import java.net.URL;
+import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
 import org.apache.hadoop.hbase.util.Bytes;
 
+import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -44,13 +47,21 @@ public class TestHFileReaderV1 {
 
   private Configuration conf;
   private FileSystem fs;
+  private Map<String, Long> startingMetrics;
 
   private static final int N = 1000;
 
   @Before
   public void setUp() throws IOException {
+    startingMetrics = SchemaMetrics.getMetricsSnapshot();
     conf = TEST_UTIL.getConfiguration();
     fs = FileSystem.get(conf);
+    SchemaMetrics.configureGlobally(conf);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    SchemaMetrics.validateMetricChanges(startingMetrics);
   }
 
   @Test

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java?rev=1201994&r1=1201993&r2=1201994&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java Tue Nov 15 01:26:05 2011
@@ -20,14 +20,24 @@
 package org.apache.hadoop.hbase.io.hfile;
 
 import java.nio.ByteBuffer;
+import java.util.Collection;
+import java.util.Map;
 import java.util.Random;
 
 import org.apache.hadoop.hbase.MediumTests;
 import org.apache.hadoop.hbase.io.HeapSize;
+import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
+import org.apache.hadoop.hbase.regionserver.metrics.TestSchemaMetrics;
 import org.apache.hadoop.hbase.util.ClassSize;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
 
-import junit.framework.TestCase;
 import org.junit.experimental.categories.Category;
+import static org.junit.Assert.*;
 
 /**
  * Tests the concurrent LruBlockCache.<p>
@@ -36,9 +46,32 @@ import org.junit.experimental.categories
  * evictions run when they're supposed to and do what they should,
  * and that cached blocks are accessible when expected to be.
  */
+@RunWith(Parameterized.class)
 @Category(MediumTests.class)
-public class TestLruBlockCache extends TestCase {
+public class TestLruBlockCache {
 
+  private Map<String, Long> startingMetrics;
+
+  public TestLruBlockCache(boolean useTableName) {
+    SchemaMetrics.setUseTableNameInTest(useTableName);
+  }
+
+  @Parameters
+  public static Collection<Object[]> parameters() {
+    return TestSchemaMetrics.parameters();
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    startingMetrics = SchemaMetrics.getMetricsSnapshot();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    SchemaMetrics.validateMetricChanges(startingMetrics);
+  }
+
+  @Test
   public void testBackgroundEvictionThread() throws Exception {
 
     long maxSize = 100000;
@@ -65,6 +98,7 @@ public class TestLruBlockCache extends T
     assertEquals(cache.getEvictionCount(), 1);
   }
 
+  @Test
   public void testCacheSimple() throws Exception {
 
     long maxSize = 1000000;
@@ -124,6 +158,7 @@ public class TestLruBlockCache extends T
     t.join();
   }
 
+  @Test
   public void testCacheEvictionSimple() throws Exception {
 
     long maxSize = 100000;
@@ -164,6 +199,7 @@ public class TestLruBlockCache extends T
     }
   }
 
+  @Test
   public void testCacheEvictionTwoPriorities() throws Exception {
 
     long maxSize = 100000;
@@ -222,6 +258,7 @@ public class TestLruBlockCache extends T
     }
   }
 
+  @Test
   public void testCacheEvictionThreePriorities() throws Exception {
 
     long maxSize = 100000;
@@ -345,6 +382,7 @@ public class TestLruBlockCache extends T
   }
 
   // test scan resistance
+  @Test
   public void testScanResistance() throws Exception {
 
     long maxSize = 100000;
@@ -407,6 +445,7 @@ public class TestLruBlockCache extends T
   }
 
   // test setMaxSize
+  @Test
   public void testResizeBlockCache() throws Exception {
 
     long maxSize = 300000;
@@ -536,6 +575,16 @@ public class TestLruBlockCache extends T
     }
 
     @Override
+    public BlockType getBlockType() {
+      return BlockType.DATA;
+    }
+
+    @Override
+    public SchemaMetrics getSchemaMetrics() {
+      return SchemaMetrics.getUnknownInstanceForTest();
+    }
+
+    @Override
     public int getSerializedLength() {
       return 0;
     }

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java?rev=1201994&r1=1201993&r2=1201994&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java Tue Nov 15 01:26:05 2011
@@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.filter.Nu
 import org.apache.hadoop.hbase.filter.PrefixFilter;
 import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
 import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
+import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -78,6 +79,8 @@ import org.junit.experimental.categories
 public class TestHRegion extends HBaseTestCase {
   static final Log LOG = LogFactory.getLog(TestHRegion.class);
 
+  private static final String COLUMN_FAMILY = "MyCF";
+
   HRegion region = null;
   private HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
   private final String DIR = TEST_UTIL.getDataTestDir("TestHRegion").toString();
@@ -95,11 +98,14 @@ public class TestHRegion extends HBaseTe
   protected final byte [] row2 = Bytes.toBytes("rowB");
 
 
+  private Map<String, Long> startingMetrics;
+
   /**
    * @see org.apache.hadoop.hbase.HBaseTestCase#setUp()
    */
   @Override
   protected void setUp() throws Exception {
+    startingMetrics = SchemaMetrics.getMetricsSnapshot();
     super.setUp();
   }
 
@@ -107,6 +113,7 @@ public class TestHRegion extends HBaseTe
   protected void tearDown() throws Exception {
     super.tearDown();
     EnvironmentEdgeManagerTestHelper.reset();
+    SchemaMetrics.validateMetricChanges(startingMetrics);
   }
 
   //////////////////////////////////////////////////////////////////////////////
@@ -326,10 +333,10 @@ public class TestHRegion extends HBaseTe
 
   public void testFamilyWithAndWithoutColon() throws Exception {
     byte [] b = Bytes.toBytes(getName());
-    byte [] cf = Bytes.toBytes("cf");
+    byte [] cf = Bytes.toBytes(COLUMN_FAMILY);
     initHRegion(b, getName(), cf);
     Put p = new Put(b);
-    byte [] cfwithcolon = Bytes.toBytes("cf:");
+    byte [] cfwithcolon = Bytes.toBytes(COLUMN_FAMILY + ":");
     p.add(cfwithcolon, cfwithcolon, cfwithcolon);
     boolean exception = false;
     try {
@@ -343,7 +350,7 @@ public class TestHRegion extends HBaseTe
   @SuppressWarnings("unchecked")
   public void testBatchPut() throws Exception {
     byte[] b = Bytes.toBytes(getName());
-    byte[] cf = Bytes.toBytes("cf");
+    byte[] cf = Bytes.toBytes(COLUMN_FAMILY);
     byte[] qual = Bytes.toBytes("qual");
     byte[] val = Bytes.toBytes("val");
     initHRegion(b, getName(), cf);

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java?rev=1201994&r1=1201993&r2=1201994&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java Tue Nov 15 01:26:05 2011
@@ -39,9 +39,13 @@ import org.apache.hadoop.hbase.*;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.io.hfile.BlockType;
 import org.apache.hadoop.hbase.io.hfile.Compression;
 import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
+import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
+import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.BlockMetricType;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
@@ -58,11 +62,13 @@ public class TestMultiColumnScanner {
   private static final Log LOG = LogFactory.getLog(TestMultiColumnScanner.class);
 
   private static final String TABLE_NAME = "TestMultiColumnScanner";
+  static final int MAX_VERSIONS = 50;
 
-  // These fields are used in other unit tests
+  // These fields are used in TestScanWithBloomError
   static final String FAMILY = "CF";
   static final byte[] FAMILY_BYTES = Bytes.toBytes(FAMILY);
-  static final int MAX_VERSIONS = 50;
+
+  private SchemaMetrics schemaMetrics;
 
   /**
    * The size of the column qualifier set used. Increasing this parameter
@@ -101,6 +107,9 @@ public class TestMultiColumnScanner {
   private Compression.Algorithm comprAlgo;
   private StoreFile.BloomType bloomType;
 
+  private long lastBlocksRead;
+  private long lastCacheHits;
+
   // Some static sanity-checking.
   static {
     assertTrue(BIG_LONG > 0.9 * Long.MAX_VALUE); // Guard against typos.
@@ -110,6 +119,13 @@ public class TestMultiColumnScanner {
       assertTrue(TIMESTAMPS[i] < TIMESTAMPS[i + 1]);
   }
 
+  @Before
+  public void setUp() {
+    SchemaMetrics.configureGlobally(TEST_UTIL.getConfiguration());
+    schemaMetrics = SchemaMetrics.getInstance(TABLE_NAME, FAMILY);
+  }
+
+
   @Parameters
   public static final Collection<Object[]> parameters() {
     return HBaseTestingUtility.BLOOM_AND_COMPRESSION_COMBINATIONS;
@@ -121,6 +137,39 @@ public class TestMultiColumnScanner {
     this.bloomType = bloomType;
   }
 
+  private long getBlocksRead() {
+    return HRegion.getNumericMetric(schemaMetrics.getBlockMetricName(
+        BlockType.BlockCategory.ALL_CATEGORIES, false,
+        BlockMetricType.READ_COUNT));
+  }
+
+  private long getCacheHits() {
+    return HRegion.getNumericMetric(schemaMetrics.getBlockMetricName(
+        BlockType.BlockCategory.ALL_CATEGORIES, false,
+        BlockMetricType.CACHE_HIT));
+  }
+
+  private void saveBlockStats() {
+    lastBlocksRead = getBlocksRead();
+    lastCacheHits = getCacheHits();
+  }
+
+  private void showBlockStats() {
+    long blocksRead = blocksReadDelta();
+    long cacheHits = cacheHitsDelta();
+    LOG.info("Compression: " + comprAlgo + ", Bloom type: "
+        + bloomType + ", blocks read: " + blocksRead + ", block cache hits: "
+        + cacheHits + ", misses: " + (blocksRead - cacheHits));
+  }
+
+  private long cacheHitsDelta() {
+    return getCacheHits() - lastCacheHits;
+  }
+
+  private long blocksReadDelta() {
+    return getBlocksRead() - lastBlocksRead;
+  }
+
   @Test
   public void testMultiColumnScanner() throws IOException {
     HRegion region = createRegion(TABLE_NAME, comprAlgo, bloomType,

Added: hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java?rev=1201994&view=auto
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java (added)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java Tue Nov 15 01:26:05 2011
@@ -0,0 +1,121 @@
+/*
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
+import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.
+    StoreMetricType;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Test metrics incremented on region server operations.
+ */
+public class TestRegionServerMetrics {
+
+  private static final Log LOG =
+      LogFactory.getLog(TestRegionServerMetrics.class.getName());
+
+  private final static String TABLE_NAME =
+      TestRegionServerMetrics.class.getSimpleName() + "Table";
+  private String[] FAMILIES = new String[] { "cf1", "cf2", "anotherCF" };
+  private static final int MAX_VERSIONS = 1;
+  private static final int NUM_COLS_PER_ROW = 15;
+  private static final int NUM_FLUSHES = 3;
+  private static final int NUM_REGIONS = 4;
+
+  private static final SchemaMetrics ALL_METRICS =
+      SchemaMetrics.ALL_SCHEMA_METRICS;
+
+  private static final HBaseTestingUtility TEST_UTIL =
+      new HBaseTestingUtility();
+
+  private Map<String, Long> startingMetrics;
+
+  private final int META_AND_ROOT = 2;
+
+  @Before
+  public void setUp() throws Exception {
+    SchemaMetrics.setUseTableNameInTest(true);
+    startingMetrics = SchemaMetrics.getMetricsSnapshot();
+    TEST_UTIL.startMiniCluster();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+    SchemaMetrics.validateMetricChanges(startingMetrics);
+  }
+
+  private void assertStoreMetricEquals(long expected,
+      SchemaMetrics schemaMetrics, StoreMetricType storeMetricType) {
+    final String storeMetricName =
+        schemaMetrics.getStoreMetricName(storeMetricType);
+    Long startValue = startingMetrics.get(storeMetricName);
+    assertEquals("Invalid value for store metric " + storeMetricName
+        + " (type " + storeMetricType + ")", expected,
+        HRegion.getNumericMetric(storeMetricName)
+            - (startValue != null ? startValue : 0));
+  }
+
+  @Test
+  public void testMultipleRegions() throws IOException, InterruptedException {
+
+    TEST_UTIL.createRandomTable(
+        TABLE_NAME,
+        Arrays.asList(FAMILIES),
+        MAX_VERSIONS, NUM_COLS_PER_ROW, NUM_FLUSHES, NUM_REGIONS, 1000);
+
+    final HRegionServer rs =
+        TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
+
+    assertEquals(NUM_REGIONS + META_AND_ROOT, rs.getOnlineRegions().size());
+
+    rs.doMetrics();
+    for (HRegion r : TEST_UTIL.getMiniHBaseCluster().getRegions(
+        Bytes.toBytes(TABLE_NAME))) {
+      for (Map.Entry<byte[], Store> storeEntry : r.getStores().entrySet()) {
+        LOG.info("For region " + r.getRegionNameAsString() + ", CF " +
+            Bytes.toStringBinary(storeEntry.getKey()) + " found store files " +
+            ": " + storeEntry.getValue().getStorefiles());
+      }
+    }
+
+    assertStoreMetricEquals(NUM_FLUSHES * NUM_REGIONS * FAMILIES.length
+        + META_AND_ROOT, ALL_METRICS, StoreMetricType.STORE_FILE_COUNT);
+
+    for (String cf : FAMILIES) {
+      SchemaMetrics schemaMetrics = SchemaMetrics.getInstance(TABLE_NAME, cf);
+      assertStoreMetricEquals(NUM_FLUSHES * NUM_REGIONS,
+          schemaMetrics, StoreMetricType.STORE_FILE_COUNT);
+    }
+  }
+
+}

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java?rev=1201994&r1=1201993&r2=1201994&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java Tue Nov 15 01:26:05 2011
@@ -26,6 +26,8 @@ import java.util.Arrays;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
 import java.util.TreeSet;
 
 import org.apache.commons.logging.Log;
@@ -41,6 +43,7 @@ import org.apache.hadoop.hbase.io.hfile.
 import org.apache.hadoop.hbase.io.hfile.CacheStats;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
+import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
 import org.apache.hadoop.hbase.util.BloomFilterFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -59,13 +62,21 @@ public class TestStoreFile extends HBase
   static final Log LOG = LogFactory.getLog(TestStoreFile.class);
   private CacheConfig cacheConf =  new CacheConfig(conf);
   private String ROOT_DIR;
+  private Map<String, Long> startingMetrics;
 
   @Override
   public void setUp() throws Exception {
     super.setUp();
+    startingMetrics = SchemaMetrics.getMetricsSnapshot();
     ROOT_DIR = new Path(this.testDir, "TestStoreFile").toString();
   }
 
+  @Override
+  public void tearDown() throws Exception {
+    super.tearDown();
+    SchemaMetrics.validateMetricChanges(startingMetrics);
+  }
+
   /**
    * Write a file and then assert that we can read from top and bottom halves
    * using two HalfMapFiles.

Added: hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/metrics/TestSchemaConfigured.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/metrics/TestSchemaConfigured.java?rev=1201994&view=auto
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/metrics/TestSchemaConfigured.java (added)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/metrics/TestSchemaConfigured.java Tue Nov 15 01:26:05 2011
@@ -0,0 +1,222 @@
+/*
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver.metrics;
+
+import static org.junit.Assert.*;
+
+import java.util.regex.Pattern;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.util.ClassSize;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONStringer;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestSchemaConfigured {
+  private static final Log LOG = LogFactory.getLog(TestSchemaConfigured.class);
+  private final String TABLE_NAME = "myTable";
+  private final String CF_NAME = "myColumnFamily";
+
+  private static final Path TMP_HFILE_PATH = new Path(
+      "/hbase/myTable/myRegion/" + HRegion.REGION_TEMP_SUBDIR + "/hfilename");
+
+  @Test
+  public void testHeapSize() {
+    SchemaConfigured sc = new SchemaConfigured(null, TABLE_NAME, CF_NAME);
+    assertEquals(ClassSize.estimateBase(SchemaConfigured.class, true),
+        sc.heapSize());
+  }
+
+  /** Test if toString generates real JSON */
+  @Test
+  public void testToString() throws JSONException {
+    SchemaConfigured sc = new SchemaConfigured(null, TABLE_NAME, CF_NAME);
+    JSONStringer json = new JSONStringer();
+    json.object();
+    json.key("tableName");
+    json.value(TABLE_NAME);
+    json.key("cfName");
+    json.value(CF_NAME);
+    json.endObject();
+    assertEquals(json.toString(), sc.schemaConfAsJSON());
+  }
+
+  /** Don't allow requesting metrics before setting table/CF name */
+  @Test
+  public void testDelayedInitialization() {
+    SchemaConfigured unconfigured = new SchemaConfigured();
+    try {
+      unconfigured.getSchemaMetrics();
+      fail(IllegalStateException.class.getSimpleName() + " expected");
+    } catch (IllegalStateException ex) {
+      assertTrue("Unexpected exception message: " + ex.getMessage(),
+          Pattern.matches(".* metrics requested before .* initialization.*",
+          ex.getMessage()));
+      LOG.debug("Expected exception: " + ex.getMessage());
+    }
+
+    SchemaMetrics.setUseTableNameInTest(false);
+    SchemaConfigured other = new SchemaConfigured(null, TABLE_NAME, CF_NAME);
+    other.passSchemaMetricsTo(unconfigured);
+    unconfigured.getSchemaMetrics();  // now this should succeed
+  }
+
+  /** Don't allow setting table/CF name twice */
+  @Test
+  public void testInitializingTwice() {
+    Configuration conf = HBaseConfiguration.create();
+    for (int i = 0; i < 4; ++i) {
+      SchemaConfigured sc = new SchemaConfigured(conf, TABLE_NAME, CF_NAME);
+      SchemaConfigured target =
+          new SchemaConfigured(conf, TABLE_NAME + (i % 2 == 1 ? "1" : ""),
+              CF_NAME + ((i & 2) != 0 ? "1" : ""));
+      if (i == 0) {
+        sc.passSchemaMetricsTo(target);  // No exception expected.
+        continue;
+      }
+
+      String testDesc =
+          "Trying to re-configure " + target.schemaConfAsJSON() + " with "
+              + sc.schemaConfAsJSON();
+      try {
+        sc.passSchemaMetricsTo(target);
+        fail(IllegalArgumentException.class.getSimpleName() + " expected");
+      } catch (IllegalArgumentException ex) {
+        final String errorMsg = testDesc + ". Unexpected exception message: " +
+            ex.getMessage();
+        final String exceptionRegex = "Trying to change table .* CF .*";
+        assertTrue(errorMsg, Pattern.matches(exceptionRegex, ex.getMessage()));
+        LOG.debug("Expected exception: " + ex.getMessage());
+      }
+    }
+  }
+
+  @Test(expected=IllegalStateException.class)
+  public void testConfigureWithUnconfigured() {
+    SchemaConfigured unconfigured = new SchemaConfigured();
+    SchemaConfigured target = new SchemaConfigured();
+    unconfigured.passSchemaMetricsTo(target);
+  }
+
+  public void testConfigurePartiallyDefined() {
+    final SchemaConfigured sc = new SchemaConfigured(null, "t1", "cf1");
+    final SchemaConfigured target1 = new SchemaConfigured(null, "t2", null);
+    sc.passSchemaMetricsTo(target1);
+    assertEquals("t2", target1.getColumnFamilyName());
+    assertEquals("cf1", target1.getColumnFamilyName());
+
+    final SchemaConfigured target2 = new SchemaConfigured(null, null, "cf2");
+    sc.passSchemaMetricsTo(target2);
+    assertEquals("t1", target2.getColumnFamilyName());
+    assertEquals("cf2", target2.getColumnFamilyName());
+
+    final SchemaConfigured target3 = new SchemaConfigured(null, null, null);
+    sc.passSchemaMetricsTo(target3);
+    assertEquals("t1", target2.getColumnFamilyName());
+    assertEquals("cf1", target2.getColumnFamilyName());
+  }
+
+  @Test(expected=IllegalArgumentException.class)
+  public void testConflictingConf() {
+    SchemaConfigured sc = new SchemaConfigured(null, "t1", "cf1");
+    SchemaConfigured target = new SchemaConfigured(null, "t2", "cf1");
+   target.passSchemaMetricsTo(sc);
+  }
+
+  /**
+   * When the "column family" deduced from the path is ".tmp" (this happens
+   * for files written on compaction) we allow re-setting the CF to another
+   * value.
+   */
+  @Test
+  public void testTmpPath() {
+    SchemaConfigured sc = new SchemaConfigured(null, "myTable", "myCF");
+    SchemaConfigured target = new SchemaConfigured(TMP_HFILE_PATH);
+    sc.passSchemaMetricsTo(target);
+  }
+
+  /**
+   * Even if CF is initially undefined (".tmp"), we don't allow to change
+   * table name.
+   */
+  @Test(expected=IllegalArgumentException.class)
+  public void testTmpPathButInvalidTable() {
+    SchemaConfigured sc = new SchemaConfigured(null, "anotherTable", "myCF");
+    SchemaConfigured target = new SchemaConfigured(TMP_HFILE_PATH);
+    sc.passSchemaMetricsTo(target);
+  }
+
+  @Test
+  public void testSchemaConfigurationHook() {
+    SchemaConfigured sc = new SchemaConfigured(null, "myTable", "myCF");
+    final StringBuilder newCF = new StringBuilder();
+    final StringBuilder newTable = new StringBuilder();
+    SchemaConfigured target = new SchemaConfigured() {
+      @Override
+      protected void schemaConfigurationChanged() {
+        newCF.append(getColumnFamilyName());
+        newTable.append(getTableName());
+      }
+    };
+    sc.passSchemaMetricsTo(target);
+    assertEquals("myTable", newTable.toString());
+    assertEquals("myCF", newCF.toString());
+  }
+
+  @Test
+  public void testResetSchemaMetricsConf() {
+    SchemaConfigured target = new SchemaConfigured(null, "t1", "cf1");
+    SchemaConfigured.resetSchemaMetricsConf(target);
+    new SchemaConfigured(null, "t2", "cf2").passSchemaMetricsTo(target);
+    assertEquals("t2", target.getTableName());
+    assertEquals("cf2", target.getColumnFamilyName());
+  }
+
+  @Test
+  public void testPathTooShort() {
+    // This has too few path components (four, the first one is empty).
+    SchemaConfigured sc1 = new SchemaConfigured(new Path("/a/b/c/d"));
+    assertEquals(SchemaMetrics.UNKNOWN, sc1.getTableName());
+    assertEquals(SchemaMetrics.UNKNOWN, sc1.getColumnFamilyName());
+
+    SchemaConfigured sc2 = new SchemaConfigured(new Path("a/b/c/d"));
+    assertEquals(SchemaMetrics.UNKNOWN, sc2.getTableName());
+    assertEquals(SchemaMetrics.UNKNOWN, sc2.getColumnFamilyName());
+
+    SchemaConfigured sc3 = new SchemaConfigured(
+        new Path("/hbase/tableName/regionId/cfName/hfileName"));
+    assertEquals("tableName", sc3.getTableName());
+    assertEquals("cfName", sc3.getColumnFamilyName());
+
+    SchemaConfigured sc4 = new SchemaConfigured(
+        new Path("hbase/tableName/regionId/cfName/hfileName"));
+    assertEquals("tableName", sc4.getTableName());
+    assertEquals("cfName", sc4.getColumnFamilyName());
+  }
+
+}

Added: hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/metrics/TestSchemaMetrics.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/metrics/TestSchemaMetrics.java?rev=1201994&view=auto
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/metrics/TestSchemaMetrics.java (added)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/metrics/TestSchemaMetrics.java Tue Nov 15 01:26:05 2011
@@ -0,0 +1,216 @@
+/*
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver.metrics;
+
+import java.util.Collection;
+import java.util.Map;
+import java.util.Random;
+
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.io.hfile.BlockType;
+import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
+import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
+
+import static org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.
+    BOOL_VALUES;
+import static org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.
+    BlockMetricType;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+
+import static org.junit.Assert.*;
+
+@Category(MediumTests.class)
+@RunWith(Parameterized.class)
+public class TestSchemaMetrics {
+
+  private final String TABLE_NAME = "myTable";
+  private final String CF_NAME = "myColumnFamily";
+
+  private final boolean useTableName;
+  private Map<String, Long> startingMetrics;
+
+  @Parameters
+  public static Collection<Object[]> parameters() {
+    return HBaseTestingUtility.BOOLEAN_PARAMETERIZED;
+  }
+
+  public TestSchemaMetrics(boolean useTableName) {
+    this.useTableName = useTableName;
+    SchemaMetrics.setUseTableNameInTest(useTableName);
+  }
+
+  @Before
+  public void setUp() {
+    startingMetrics = SchemaMetrics.getMetricsSnapshot();
+  };
+
+  @Test
+  public void testNaming() {
+    final String metricPrefix = (useTableName ? "tbl." +
+        TABLE_NAME + "." : "") + "cf." + CF_NAME + ".";
+    SchemaMetrics schemaMetrics = SchemaMetrics.getInstance(TABLE_NAME,
+        CF_NAME);
+    SchemaMetrics ALL_CF_METRICS = SchemaMetrics.ALL_SCHEMA_METRICS;
+
+    // fsReadTimeMetric
+    assertEquals(metricPrefix + "fsRead", schemaMetrics.getBlockMetricName(
+        BlockCategory.ALL_CATEGORIES, false, BlockMetricType.READ_TIME));
+
+    // compactionReadTimeMetric
+    assertEquals(metricPrefix + "compactionRead",
+        schemaMetrics.getBlockMetricName(BlockCategory.ALL_CATEGORIES, true,
+            BlockMetricType.READ_TIME));
+
+    // fsBlockReadCntMetric
+    assertEquals(metricPrefix + "fsBlockReadCnt",
+        schemaMetrics.getBlockMetricName(BlockCategory.ALL_CATEGORIES, false,
+            BlockMetricType.READ_COUNT));
+
+    // fsBlockReadCacheHitCntMetric
+    assertEquals(metricPrefix + "fsBlockReadCacheHitCnt",
+        schemaMetrics.getBlockMetricName(BlockCategory.ALL_CATEGORIES, false,
+            BlockMetricType.CACHE_HIT));
+
+    // fsBlockReadCacheMissCntMetric
+    assertEquals(metricPrefix + "fsBlockReadCacheMissCnt",
+        schemaMetrics.getBlockMetricName(BlockCategory.ALL_CATEGORIES, false,
+            BlockMetricType.CACHE_MISS));
+
+    // compactionBlockReadCntMetric
+    assertEquals(metricPrefix + "compactionBlockReadCnt",
+        schemaMetrics.getBlockMetricName(BlockCategory.ALL_CATEGORIES, true,
+            BlockMetricType.READ_COUNT));
+
+    // compactionBlockReadCacheHitCntMetric
+    assertEquals(metricPrefix + "compactionBlockReadCacheHitCnt",
+        schemaMetrics.getBlockMetricName(BlockCategory.ALL_CATEGORIES, true,
+            BlockMetricType.CACHE_HIT));
+
+    // compactionBlockReadCacheMissCntMetric
+    assertEquals(metricPrefix + "compactionBlockReadCacheMissCnt",
+        schemaMetrics.getBlockMetricName(BlockCategory.ALL_CATEGORIES, true,
+            BlockMetricType.CACHE_MISS));
+
+    // fsMetaBlockReadCntMetric
+    assertEquals("fsMetaBlockReadCnt", ALL_CF_METRICS.getBlockMetricName(
+        BlockCategory.META, false, BlockMetricType.READ_COUNT));
+
+    // fsMetaBlockReadCacheHitCntMetric
+    assertEquals("fsMetaBlockReadCacheHitCnt",
+        ALL_CF_METRICS.getBlockMetricName(BlockCategory.META, false,
+            BlockMetricType.CACHE_HIT));
+
+    // fsMetaBlockReadCacheMissCntMetric
+    assertEquals("fsMetaBlockReadCacheMissCnt",
+        ALL_CF_METRICS.getBlockMetricName(BlockCategory.META, false,
+            BlockMetricType.CACHE_MISS));
+
+    // Per-(column family, block type) statistics.
+    assertEquals(metricPrefix + "bt.Index.fsBlockReadCnt",
+        schemaMetrics.getBlockMetricName(BlockCategory.INDEX, false,
+            BlockMetricType.READ_COUNT));
+
+    assertEquals(metricPrefix + "bt.Data.compactionBlockReadCacheHitCnt",
+        schemaMetrics.getBlockMetricName(BlockCategory.DATA, true,
+            BlockMetricType.CACHE_HIT));
+
+    // A special case for Meta blocks
+    assertEquals(metricPrefix + "compactionMetaBlockReadCacheHitCnt",
+        schemaMetrics.getBlockMetricName(BlockCategory.META, true,
+            BlockMetricType.CACHE_HIT));
+
+    // Cache metrics
+    assertEquals(metricPrefix + "blockCacheSize",
+        schemaMetrics.getBlockMetricName(BlockCategory.ALL_CATEGORIES, false,
+            BlockMetricType.CACHE_SIZE));
+
+    assertEquals(metricPrefix + "bt.Index.blockCacheNumEvicted",
+        schemaMetrics.getBlockMetricName(BlockCategory.INDEX, false,
+            BlockMetricType.EVICTED));
+
+    assertEquals("bt.Data.blockCacheNumCached",
+        ALL_CF_METRICS.getBlockMetricName(BlockCategory.DATA, false,
+            BlockMetricType.CACHED));
+
+    assertEquals("blockCacheNumCached", ALL_CF_METRICS.getBlockMetricName(
+        BlockCategory.ALL_CATEGORIES, false, BlockMetricType.CACHED));
+
+    // "Non-compaction aware" metrics
+    try {
+      ALL_CF_METRICS.getBlockMetricName(BlockCategory.ALL_CATEGORIES, true,
+          BlockMetricType.CACHE_SIZE);
+      fail("Exception expected");
+    } catch (IllegalArgumentException ex) {
+    }
+
+    // Bloom metrics
+    assertEquals("keyMaybeInBloomCnt", ALL_CF_METRICS.getBloomMetricName(true));
+    assertEquals(metricPrefix + "keyNotInBloomCnt",
+        schemaMetrics.getBloomMetricName(false));
+
+    schemaMetrics.printMetricNames();
+  }
+
+  public void checkMetrics() {
+    SchemaMetrics.validateMetricChanges(startingMetrics);
+  }
+
+  @Test
+  public void testIncrements() {
+    Random rand = new Random(23982737L);
+    for (int i = 1; i <= 3; ++i) {
+      final String tableName = "table" + i;
+      for (int j = 1; j <= 3; ++j) {
+        final String cfName = "cf" + j;
+        SchemaMetrics sm = SchemaMetrics.getInstance(tableName, cfName);
+        for (boolean isInBloom : BOOL_VALUES) {
+          sm.updateBloomMetrics(isInBloom);
+          checkMetrics();
+        }
+
+        for (BlockCategory blockCat : BlockType.BlockCategory.values()) {
+          if (blockCat == BlockCategory.ALL_CATEGORIES) {
+            continue;
+          }
+
+          for (boolean isCompaction : BOOL_VALUES) {
+            sm.updateOnCacheHit(blockCat, isCompaction);
+            checkMetrics();
+            sm.updateOnCacheMiss(blockCat, isCompaction, rand.nextInt());
+            checkMetrics();
+          }
+
+          for (boolean isEviction : BOOL_VALUES) {
+            sm.updateOnCachePutOrEvict(blockCat, (isEviction ? -1 : 1)
+                * rand.nextInt(1024 * 1024), isEviction);
+          }
+        }
+      }
+    }
+  }
+
+}



Mime
View raw message