hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject [1/2] HBASE-11366 Backport HBASE-4089 (block cache contents report UI) to 0.98
Date Wed, 18 Jun 2014 03:29:44 GMT
Repository: hbase
Updated Branches:
  refs/heads/0.98 1c4cdefe2 -> da94cd398


http://git-wip-us.apache.org/repos/asf/hbase/blob/da94cd39/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java
new file mode 100644
index 0000000..f61aba6
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java
@@ -0,0 +1,109 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.io.hfile;
+
+import com.google.common.collect.MinMaxPriorityQueue;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.io.HeapSize;
+
+/**
+ * A memory-bound queue that will grow until an element brings
+ * total size >= maxSize.  From then on, only entries that are sorted larger
+ * than the smallest current entry will be inserted/replaced.
+ *
+ * <p>Use this when you want to find the largest elements (according to their
+ * ordering, not their heap size) that consume as close to the specified
+ * maxSize as possible.  Default behavior is to grow just above rather than
+ * just below specified max.
+ *
+ * <p>Object used in this queue must implement {@link HeapSize} as well as
+ * {@link Comparable}.
+ */
+@InterfaceAudience.Private
+public class LruCachedBlockQueue implements HeapSize {
+
+  private MinMaxPriorityQueue<LruCachedBlock> queue;
+
+  private long heapSize;
+  private long maxSize;
+
+  /**
+   * @param maxSize the target size of elements in the queue
+   * @param blockSize expected average size of blocks
+   */
+  public LruCachedBlockQueue(long maxSize, long blockSize) {
+    int initialSize = (int)(maxSize / blockSize);
+    if(initialSize == 0) initialSize++;
+    queue = MinMaxPriorityQueue.expectedSize(initialSize).create();
+    heapSize = 0;
+    this.maxSize = maxSize;
+  }
+
+  /**
+   * Attempt to add the specified cached block to this queue.
+   *
+   * <p>If the queue is smaller than the max size, or if the specified element
+   * is ordered before the smallest element in the queue, the element will be
+   * added to the queue.  Otherwise, there is no side effect of this call.
+   * @param cb block to try to add to the queue
+   */
+  public void add(LruCachedBlock cb) {
+    if(heapSize < maxSize) {
+      queue.add(cb);
+      heapSize += cb.heapSize();
+    } else {
+      LruCachedBlock head = queue.peek();
+      if(cb.compareTo(head) > 0) {
+        heapSize += cb.heapSize();
+        heapSize -= head.heapSize();
+        if(heapSize > maxSize) {
+          queue.poll();
+        } else {
+          heapSize += head.heapSize();
+        }
+        queue.add(cb);
+      }
+    }
+  }
+
+  /**
+   * @return The next element in this queue, or {@code null} if the queue is
+   * empty.
+   */
+  public LruCachedBlock poll() {
+    return queue.poll();
+  }
+
+  /**
+   * @return The last element in this queue, or {@code null} if the queue is
+   * empty.
+   */
+  public LruCachedBlock pollLast() {
+    return queue.pollLast();
+  }
+
+  /**
+   * Total size of all elements in this queue.
+   * @return size of all elements currently in queue, in bytes
+   */
+  public long heapSize() {
+    return heapSize;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/da94cd39/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java
deleted file mode 100644
index 57ffeb8..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java
+++ /dev/null
@@ -1,140 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.io.hfile;
-
-import java.lang.ref.ReferenceQueue;
-import java.lang.ref.SoftReference;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-
-
-/**
- * Simple one RFile soft reference cache.
- */
-@InterfaceAudience.Private
-public class SimpleBlockCache implements BlockCache {
-  private static class Ref extends SoftReference<Cacheable> {
-    public BlockCacheKey blockId;
-    public Ref(BlockCacheKey blockId, Cacheable block, ReferenceQueue q) {
-      super(block, q);
-      this.blockId = blockId;
-    }
-  }
-  private Map<BlockCacheKey,Ref> cache =
-    new HashMap<BlockCacheKey,Ref>();
-
-  private ReferenceQueue q = new ReferenceQueue();
-  public int dumps = 0;
-
-  /**
-   * Constructor
-   */
-  public SimpleBlockCache() {
-    super();
-  }
-
-  void processQueue() {
-    Ref r;
-    while ( (r = (Ref)q.poll()) != null) {
-      cache.remove(r.blockId);
-      dumps++;
-    }
-  }
-
-  /**
-   * @return the size
-   */
-  public synchronized long size() {
-    processQueue();
-    return cache.size();
-  }
-
-  public synchronized Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat,
-      boolean updateCacheMetrics) {
-    processQueue(); // clear out some crap.
-    Ref ref = cache.get(cacheKey);
-    if (ref == null)
-      return null;
-    return ref.get();
-  }
-
-  public synchronized void cacheBlock(BlockCacheKey cacheKey, Cacheable block) {
-    cache.put(cacheKey, new Ref(cacheKey, block, q));
-  }
-
-  public synchronized void cacheBlock(BlockCacheKey cacheKey, Cacheable block,
-      boolean inMemory) {
-    cache.put(cacheKey, new Ref(cacheKey, block, q));
-  }
-
-  @Override
-  public boolean evictBlock(BlockCacheKey cacheKey) {
-    return cache.remove(cacheKey) != null;
-  }
-
-  public void shutdown() {
-    // noop
-  }
-
-  @Override
-  public CacheStats getStats() {
-    // TODO: implement this if we ever actually use this block cache
-    return null;
-  }
-
-  @Override
-  public long getFreeSize() {
-    // TODO: implement this if we ever actually use this block cache
-    return 0;
-  }
-
-  @Override
-  public long getCurrentSize() {
-    // TODO: implement this if we ever actually use this block cache
-    return 0;
-  }
-
-  @Override
-  public long getEvictedCount() {
-    // TODO: implement this if we ever actually use this block cache
-    return 0;
-  }
-
-  @Override
-  public int evictBlocksByHfileName(String string) {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public List<BlockCacheColumnFamilySummary> getBlockCacheColumnFamilySummaries(Configuration conf) {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public long getBlockCount() {
-    // TODO: implement this if we ever actually use this block cache
-    return 0;
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/hbase/blob/da94cd39/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
index 39ef523..b9930bb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
@@ -32,6 +32,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
 import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache.BucketEntry;
+import org.codehaus.jackson.annotate.JsonIgnoreProperties;
 
 /**
  * This class is used to allocate a block with specified size and free the block
@@ -42,10 +43,12 @@ import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache.BucketEntry;
  * This class is not thread safe.
  */
 @InterfaceAudience.Private
+@JsonIgnoreProperties({"indexStatistics", "freeSize", "usedSize"})
 public final class BucketAllocator {
   static final Log LOG = LogFactory.getLog(BucketAllocator.class);
 
-  final private static class Bucket {
+  @JsonIgnoreProperties({"completelyFree", "uninstantiated"})
+  public final static class Bucket {
     private long baseOffset;
     private int itemAllocationSize, sizeIndex;
     private int itemCount;
@@ -77,7 +80,7 @@ public final class BucketAllocator {
       return sizeIndex;
     }
 
-    public int itemAllocationSize() {
+    public int getItemAllocationSize() {
       return itemAllocationSize;
     }
 
@@ -97,15 +100,15 @@ public final class BucketAllocator {
       return usedCount;
     }
 
-    public int freeBytes() {
+    public int getFreeBytes() {
       return freeCount * itemAllocationSize;
     }
 
-    public int usedBytes() {
+    public int getUsedBytes() {
       return usedCount * itemAllocationSize;
     }
 
-    public long baseOffset() {
+    public long getBaseOffset() {
       return baseOffset;
     }
 
@@ -372,19 +375,18 @@ public final class BucketAllocator {
       }
       realCacheSize.addAndGet(foundLen);
       buckets[bucketNo].addAllocation(foundOffset);
-      usedSize += buckets[bucketNo].itemAllocationSize();
+      usedSize += buckets[bucketNo].getItemAllocationSize();
       bucketSizeInfos[bucketSizeIndex].blockAllocated(b);
     }
   }
 
-  public String getInfo() {
+  public String toString() {
     StringBuilder sb = new StringBuilder(1024);
     for (int i = 0; i < buckets.length; ++i) {
       Bucket b = buckets[i];
-      sb.append("    Bucket ").append(i).append(": ").append(b.itemAllocationSize());
-      sb.append(" freeCount=").append(b.freeCount()).append(" used=")
-          .append(b.usedCount());
-      sb.append('\n');
+      if (i > 0) sb.append(", ");
+      sb.append("bucket.").append(i).append(": size=").append(b.getItemAllocationSize());
+      sb.append(", freeCount=").append(b.freeCount()).append(", used=").append(b.usedCount());
     }
     return sb.toString();
   }
@@ -441,8 +443,8 @@ public final class BucketAllocator {
     assert bucketNo >= 0 && bucketNo < buckets.length;
     Bucket targetBucket = buckets[bucketNo];
     bucketSizeInfos[targetBucket.sizeIndex()].freeBlock(targetBucket, offset);
-    usedSize -= targetBucket.itemAllocationSize();
-    return targetBucket.itemAllocationSize();
+    usedSize -= targetBucket.getItemAllocationSize();
+    return targetBucket.getItemAllocationSize();
   }
 
   public int sizeIndexOfAllocation(long offset) {
@@ -456,7 +458,7 @@ public final class BucketAllocator {
     int bucketNo = (int) (offset / bucketCapacity);
     assert bucketNo >= 0 && bucketNo < buckets.length;
     Bucket targetBucket = buckets[bucketNo];
-    return targetBucket.itemAllocationSize();
+    return targetBucket.getItemAllocationSize();
   }
 
   static class IndexStatistics {
@@ -506,6 +508,10 @@ public final class BucketAllocator {
     }
   }
 
+  public Bucket [] getBuckets() {
+    return this.buckets;
+  }
+
   public void dumpToLog() {
     logStatistics();
     StringBuilder sb = new StringBuilder();

http://git-wip-us.apache.org/repos/asf/hbase/blob/da94cd39/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index b1394d6..4600fac 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -31,6 +31,7 @@ import java.io.Serializable;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Comparator;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.PriorityQueue;
@@ -48,15 +49,17 @@ import java.util.concurrent.locks.ReentrantLock;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.io.hfile.BlockCache;
-import org.apache.hadoop.hbase.io.hfile.BlockCacheColumnFamilySummary;
 import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
+import org.apache.hadoop.hbase.io.hfile.BlockPriority;
+import org.apache.hadoop.hbase.io.hfile.BlockType;
 import org.apache.hadoop.hbase.io.hfile.CacheStats;
 import org.apache.hadoop.hbase.io.hfile.Cacheable;
 import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
 import org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager;
+import org.apache.hadoop.hbase.io.hfile.CachedBlock;
+import org.apache.hadoop.hbase.io.hfile.BlockCacheUtil;
 import org.apache.hadoop.hbase.io.hfile.CombinedBlockCache;
 import org.apache.hadoop.hbase.io.hfile.HFileBlock;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
@@ -254,7 +257,15 @@ public class BucketCache implements BlockCache, HeapSize {
     // Run the statistics thread periodically to print the cache statistics log
     this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this),
         statThreadPeriod, statThreadPeriod, TimeUnit.SECONDS);
-    LOG.info("Started bucket cache");
+    LOG.info("Started bucket cache; ioengine=" + ioEngineName +
+        ", capacity=" + StringUtils.byteDesc(capacity) +
+      ", blockSize=" + StringUtils.byteDesc(blockSize) + ", writerThreadNum=" +
+        writerThreadNum + ", writerQLen=" + writerQLen + ", persistencePath=" +
+      persistencePath + ", bucketAllocator=" + this.bucketAllocator);
+  }
+
+  public String getIoEngine() {
+    return ioEngine.toString();
   }
 
   /**
@@ -375,8 +386,9 @@ public class BucketCache implements BlockCache, HeapSize {
           if (lenRead != len) {
             throw new RuntimeException("Only " + lenRead + " bytes read, " + len + " expected");
           }
-          Cacheable cachedBlock = bucketEntry.deserializerReference(
-              deserialiserMap).deserialize(bb, true);
+          CacheableDeserializer<Cacheable> deserializer =
+            bucketEntry.deserializerReference(this.deserialiserMap);
+          Cacheable cachedBlock = deserializer.deserialize(bb, true);
           long timeTaken = System.nanoTime() - start;
           if (updateCacheMetrics) {
             cacheStats.hit(caching);
@@ -898,7 +910,7 @@ public class BucketCache implements BlockCache, HeapSize {
     return cacheStats;
   }
 
-  BucketAllocator getAllocator() {
+  public BucketAllocator getAllocator() {
     return this.bucketAllocator;
   }
 
@@ -927,11 +939,6 @@ public class BucketCache implements BlockCache, HeapSize {
     return this.bucketAllocator.getUsedSize();
   }
 
-  @Override
-  public long getEvictedCount() {
-    return cacheStats.getEvictedCount();
-  }
-
   /**
    * Evicts all blocks for a specific HFile.
    * <p>
@@ -958,28 +965,6 @@ public class BucketCache implements BlockCache, HeapSize {
     return numEvicted;
   }
 
-
-  @Override
-  public List<BlockCacheColumnFamilySummary> getBlockCacheColumnFamilySummaries(
-      Configuration conf) {
-    throw new UnsupportedOperationException();
-  }
-
-  static enum BlockPriority {
-    /**
-     * Accessed a single time (used for scan-resistance)
-     */
-    SINGLE,
-    /**
-     * Accessed multiple times
-     */
-    MULTI,
-    /**
-     * Block from in-memory store
-     */
-    MEMORY
-  };
-
   /**
    * Item in cache. We expect this to be where most memory goes. Java uses 8
    * bytes just for object headers; after this, we want to use as little as
@@ -996,6 +981,10 @@ public class BucketCache implements BlockCache, HeapSize {
     byte deserialiserIndex;
     private volatile long accessTime;
     private BlockPriority priority;
+    /**
+     * Time this block was cached.  Presumes we are created just before we are added to the cache.
+     */
+    private final long cachedTime = System.nanoTime();
 
     BucketEntry(long offset, int length, long accessTime, boolean inMemory) {
       setOffset(offset);
@@ -1062,6 +1051,10 @@ public class BucketCache implements BlockCache, HeapSize {
     public boolean equals(Object that) {
       return this == that;
     }
+
+    public long getCachedTime() {
+      return cachedTime;
+    }
   }
 
   /**
@@ -1200,4 +1193,75 @@ public class BucketCache implements BlockCache, HeapSize {
     }
   }
 
+  @Override
+  public Iterator<CachedBlock> iterator() {
+    // Don't bother with ramcache since stuff is in here only a little while.
+    final Iterator<Map.Entry<BlockCacheKey, BucketEntry>> i =
+        this.backingMap.entrySet().iterator();
+    return new Iterator<CachedBlock>() {
+      private final long now = System.nanoTime();
+
+      @Override
+      public boolean hasNext() {
+        return i.hasNext();
+      }
+
+      @Override
+      public CachedBlock next() {
+        final Map.Entry<BlockCacheKey, BucketEntry> e = i.next();
+        return new CachedBlock() {
+          @Override
+          public String toString() {
+            return BlockCacheUtil.toString(this, now);
+          }
+
+          @Override
+          public BlockPriority getBlockPriority() {
+            return e.getValue().getPriority();
+          }
+
+          @Override
+          public BlockType getBlockType() {
+            // Not held by BucketEntry.  Could add it if wanted on BucketEntry creation.
+            return null;
+          }
+
+          @Override
+          public long getOffset() {
+            return e.getKey().getOffset();
+          }
+
+          @Override
+          public long getSize() {
+            return e.getValue().getLength();
+          }
+
+          @Override
+          public long getCachedTime() {
+            return e.getValue().getCachedTime();
+          }
+
+          @Override
+          public String getFilename() {
+            return e.getKey().getHfileName();
+          }
+
+          @Override
+          public int compareTo(CachedBlock other) {
+            return (int)(this.getOffset() - other.getOffset());
+          }
+        };
+      }
+
+      @Override
+      public void remove() {
+        throw new UnsupportedOperationException();
+      }
+    };
+  }
+
+  @Override
+  public BlockCache[] getBlockCaches() {
+    return null;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/da94cd39/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java
index 37c579a..cffe905 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java
@@ -34,6 +34,12 @@ public class BucketCacheStats extends CacheStats {
   private final static int nanoTime = 1000000;
   private long lastLogTime = EnvironmentEdgeManager.currentTimeMillis();
 
+  @Override
+  public String toString() {
+    return super.toString() + ", ioHitsPerSecond=" + getIOHitsPerSecond() +
+      ", ioTimePerHit=" + getIOTimePerHit();
+  }
+
   public void ioHit(long time) {
     ioHitCount.incrementAndGet();
     ioHitTime.addAndGet(time);
@@ -43,7 +49,7 @@ public class BucketCacheStats extends CacheStats {
     long now = EnvironmentEdgeManager.currentTimeMillis();
     long took = (now - lastLogTime) / 1000;
     lastLogTime = now;
-    return ioHitCount.get() / took;
+    return took == 0? 0: ioHitCount.get() / took;
   }
 
   public double getIOTimePerHit() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/da94cd39/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java
index 50f93be..833fecb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java
@@ -32,6 +32,8 @@ import org.apache.hadoop.hbase.util.ByteBufferArray;
 public class ByteBufferIOEngine implements IOEngine {
 
   private ByteBufferArray bufferArray;
+  private final long capacity;
+  private final boolean direct;
 
   /**
    * Construct the ByteBufferIOEngine with the given capacity
@@ -41,9 +43,17 @@ public class ByteBufferIOEngine implements IOEngine {
    */
   public ByteBufferIOEngine(long capacity, boolean direct)
       throws IOException {
+    this.capacity = capacity;
+    this.direct = direct;
     bufferArray = new ByteBufferArray(capacity, direct);
   }
 
+  @Override
+  public String toString() {
+    return "ioengine=" + this.getClass().getSimpleName() + ", capacity=" +
+      String.format("%,d", this.capacity) + ", direct=" + this.direct;
+  }
+
   /**
    * Memory IO engine is always unable to support persistent storage for the
    * cache

http://git-wip-us.apache.org/repos/asf/hbase/blob/da94cd39/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
index a1eea0b..0f28e1a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
@@ -36,8 +36,12 @@ public class FileIOEngine implements IOEngine {
   static final Log LOG = LogFactory.getLog(FileIOEngine.class);
 
   private FileChannel fileChannel = null;
+  private final String path;
+  private long size;
 
   public FileIOEngine(String filePath, long fileSize) throws IOException {
+    this.path = filePath;
+    this.size = fileSize;
     RandomAccessFile raf = null;
     try {
       raf = new RandomAccessFile(filePath, "rw");
@@ -56,6 +60,12 @@ public class FileIOEngine implements IOEngine {
     }
   }
 
+  @Override
+  public String toString() {
+    return "ioengine=" + this.getClass().getSimpleName() + ", path=" + this.path +
+      ", size=" + String.format("%,d", this.size);
+  }
+
   /**
    * File IO engine is always able to support persistent storage for the cache
    * @return true

http://git-wip-us.apache.org/repos/asf/hbase/blob/da94cd39/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java
index 4d209b3..fe59ef8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java
@@ -19,21 +19,20 @@
 package org.apache.hadoop.hbase.io.hfile.slab;
 
 import java.nio.ByteBuffer;
-import java.util.List;
+import java.util.Iterator;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.io.hfile.BlockCache;
-import org.apache.hadoop.hbase.io.hfile.BlockCacheColumnFamilySummary;
 import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
 import org.apache.hadoop.hbase.io.hfile.CacheStats;
 import org.apache.hadoop.hbase.io.hfile.Cacheable;
 import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
+import org.apache.hadoop.hbase.io.hfile.CachedBlock;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ClassSize;
 import org.apache.hadoop.util.StringUtils;
@@ -319,16 +318,6 @@ public class SingleSizeCache implements BlockCache, HeapSize {
     return 0;
   }
 
-  /*
-   * Not implemented. Extremely costly to do this from the off heap cache, you'd
-   * need to copy every object on heap once
-   */
-  @Override
-  public List<BlockCacheColumnFamilySummary> getBlockCacheColumnFamilySummaries(
-      Configuration conf) {
-    throw new UnsupportedOperationException();
-  }
-
   /* Just a pair class, holds a reference to the parent cacheable */
   private static class CacheablePair implements HeapSize {
     final CacheableDeserializer<Cacheable> deserializer;
@@ -353,4 +342,14 @@ public class SingleSizeCache implements BlockCache, HeapSize {
           + ClassSize.ATOMIC_LONG);
     }
   }
-}
+
+  @Override
+  public Iterator<CachedBlock> iterator() {
+    return null;
+  }
+
+  @Override
+  public BlockCache[] getBlockCaches() {
+    return null;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/da94cd39/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java
index b0f9465..7699893 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java
@@ -20,7 +20,8 @@
 package org.apache.hadoop.hbase.io.hfile.slab;
 
 import java.math.BigDecimal;
-import java.util.List;
+import java.util.Iterator;
+import java.util.Map;
 import java.util.Map.Entry;
 import java.util.TreeMap;
 import java.util.concurrent.ConcurrentHashMap;
@@ -35,10 +36,13 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.io.hfile.BlockCache;
-import org.apache.hadoop.hbase.io.hfile.BlockCacheColumnFamilySummary;
 import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
+import org.apache.hadoop.hbase.io.hfile.BlockPriority;
+import org.apache.hadoop.hbase.io.hfile.BlockType;
 import org.apache.hadoop.hbase.io.hfile.CacheStats;
 import org.apache.hadoop.hbase.io.hfile.Cacheable;
+import org.apache.hadoop.hbase.io.hfile.CachedBlock;
+import org.apache.hadoop.hbase.io.hfile.BlockCacheUtil;
 import org.apache.hadoop.hbase.util.ClassSize;
 import org.apache.hadoop.hbase.util.HasThread;
 import org.apache.hadoop.util.StringUtils;
@@ -58,7 +62,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
 public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
 
   private final ConcurrentHashMap<BlockCacheKey, SingleSizeCache> backingStore;
-  private final TreeMap<Integer, SingleSizeCache> sizer;
+  private final TreeMap<Integer, SingleSizeCache> slabs;
   static final Log LOG = LogFactory.getLog(SlabCache.class);
   static final int STAT_THREAD_PERIOD_SECS = 60 * 5;
 
@@ -88,10 +92,13 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
     this.successfullyCachedStats = new SlabStats();
 
     backingStore = new ConcurrentHashMap<BlockCacheKey, SingleSizeCache>();
-    sizer = new TreeMap<Integer, SingleSizeCache>();
+    slabs = new TreeMap<Integer, SingleSizeCache>();
     this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this),
         STAT_THREAD_PERIOD_SECS, STAT_THREAD_PERIOD_SECS, TimeUnit.SECONDS);
+  }
 
+  public Map<Integer, SingleSizeCache> getSizer() {
+    return slabs;
   }
 
   /**
@@ -168,7 +175,7 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
    *         object is too large, returns null.
    */
   Entry<Integer, SingleSizeCache> getHigherBlock(int size) {
-    return sizer.higherEntry(size - 1);
+    return slabs.higherEntry(size - 1);
   }
 
   private BigDecimal[] stringArrayToBigDecimalArray(String[] parsee) {
@@ -182,7 +189,7 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
   private void addSlab(int blockSize, int numBlocks) {
     LOG.info("Creating a slab of blockSize " + blockSize + " with " + numBlocks
         + " blocks, " + StringUtils.humanReadableInt(blockSize * (long) numBlocks) + "bytes.");
-    sizer.put(blockSize, new SingleSizeCache(blockSize, numBlocks, this));
+    slabs.put(blockSize, new SingleSizeCache(blockSize, numBlocks, this));
   }
 
   /**
@@ -283,7 +290,7 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
    * Also terminates the scheduleThreadPool.
    */
   public void shutdown() {
-    for (SingleSizeCache s : sizer.values()) {
+    for (SingleSizeCache s : slabs.values()) {
       s.shutdown();
     }
     this.scheduleThreadPool.shutdown();
@@ -291,7 +298,7 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
 
   public long heapSize() {
     long childCacheSize = 0;
-    for (SingleSizeCache s : sizer.values()) {
+    for (SingleSizeCache s : slabs.values()) {
       childCacheSize += s.heapSize();
     }
     return SlabCache.CACHE_FIXED_OVERHEAD + childCacheSize;
@@ -303,7 +310,7 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
 
   public long getFreeSize() {
     long childFreeSize = 0;
-    for (SingleSizeCache s : sizer.values()) {
+    for (SingleSizeCache s : slabs.values()) {
       childFreeSize += s.getFreeSize();
     }
     return childFreeSize;
@@ -312,7 +319,7 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
   @Override
   public long getBlockCount() {
     long count = 0;
-    for (SingleSizeCache cache : sizer.values()) {
+    for (SingleSizeCache cache : slabs.values()) {
       count += cache.getBlockCount();
     }
     return count;
@@ -340,7 +347,7 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
 
     @Override
     public void run() {
-      for (SingleSizeCache s : ourcache.sizer.values()) {
+      for (SingleSizeCache s : ourcache.slabs.values()) {
         s.logStats();
       }
 
@@ -418,14 +425,75 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
     return numEvicted;
   }
 
-  /*
-   * Not implemented. Extremely costly to do this from the off heap cache, you'd
-   * need to copy every object on heap once
-   */
   @Override
-  public List<BlockCacheColumnFamilySummary> getBlockCacheColumnFamilySummaries(
-      Configuration conf) {
-    throw new UnsupportedOperationException();
+  public Iterator<CachedBlock> iterator() {
+    // Don't bother with ramcache since stuff is in here only a little while.
+    final Iterator<Map.Entry<BlockCacheKey, SingleSizeCache>> i =
+        this.backingStore.entrySet().iterator();
+    return new Iterator<CachedBlock>() {
+      private final long now = System.nanoTime();
+
+      @Override
+      public boolean hasNext() {
+        return i.hasNext();
+      }
+
+      @Override
+      public CachedBlock next() {
+        final Map.Entry<BlockCacheKey, SingleSizeCache> e = i.next();
+        final Cacheable cacheable = e.getValue().getBlock(e.getKey(), false, false, false);
+        return new CachedBlock() {
+          @Override
+          public String toString() {
+            return BlockCacheUtil.toString(this, now);
+          }
+
+          @Override
+          public BlockPriority getBlockPriority() {
+            return null;
+          }
+
+          @Override
+          public BlockType getBlockType() {
+            return cacheable.getBlockType();
+          }
+
+          @Override
+          public long getOffset() {
+            return e.getKey().getOffset();
+          }
+
+          @Override
+          public long getSize() {
+            return cacheable == null? 0: cacheable.getSerializedLength();
+          }
+
+          @Override
+          public long getCachedTime() {
+            return -1;
+          }
+
+          @Override
+          public String getFilename() {
+            return e.getKey().getHfileName();
+          }
+
+          @Override
+          public int compareTo(CachedBlock other) {
+            return (int)(this.getOffset() - other.getOffset());
+          }
+        };
+      }
+
+      @Override
+      public void remove() {
+        throw new UnsupportedOperationException();
+      }
+    };
   }
 
+  @Override
+  public BlockCache[] getBlockCaches() {
+    return null;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/da94cd39/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/StateDumpServlet.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/StateDumpServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/StateDumpServlet.java
index c62e768..6e869e7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/StateDumpServlet.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/StateDumpServlet.java
@@ -60,4 +60,4 @@ public abstract class StateDumpServlet extends HttpServlet {
       status.dumpTo(out, "  ");
     }
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/da94cd39/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 1f3736d..d7f95d8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -4628,4 +4628,11 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa
     respBuilder.setResponse(openInfoList.size());
     return respBuilder.build();
   }
+
+  /**
+   * @return The cache config instance used by the regionserver.
+   */
+  public CacheConfig getCacheConfig() {
+    return this.cacheConfig;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/da94cd39/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSStatusServlet.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSStatusServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSStatusServlet.java
index 64ae859..cae705c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSStatusServlet.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSStatusServlet.java
@@ -39,20 +39,24 @@ public class RSStatusServlet extends HttpServlet {
     HRegionServer hrs = (HRegionServer)getServletContext().getAttribute(
         HRegionServer.REGIONSERVER);
     assert hrs != null : "No RS in context!";
-    
+
     resp.setContentType("text/html");
-    
+
     if (!hrs.isOnline()) {
       resp.getWriter().write("The RegionServer is initializing!");
       resp.getWriter().close();
       return;
     }
-    
+
     RSStatusTmpl tmpl = new RSStatusTmpl();
     if (req.getParameter("format") != null)
       tmpl.setFormat(req.getParameter("format"));
     if (req.getParameter("filter") != null)
       tmpl.setFilter(req.getParameter("filter"));
+    if (req.getParameter("bcn") != null)
+      tmpl.setBcn(req.getParameter("bcn"));
+    if (req.getParameter("bcv") != null)
+      tmpl.setBcv(req.getParameter("bcv"));
     tmpl.render(resp.getWriter(), hrs);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/da94cd39/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp b/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp
index be9c750..31fab53 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp
@@ -92,9 +92,9 @@
             <ul class="nav navbar-nav">
                 <li><a href="/master-status">Home</a></li>
                 <li><a href="/tablesDetailed.jsp">Table Details</a></li>
-                <li><a href="/logs/">Local logs</a></li>
+                <li><a href="/logs/">Local Logs</a></li>
                 <li><a href="/logLevel">Log Level</a></li>
-                <li><a href="/dump">Debug dump</a></li>
+                <li><a href="/dump">Debug Dump</a></li>
                 <li><a href="/jmx">Metrics Dump</a></li>
                 <% if (HBaseConfiguration.isShowConfInServlet()) { %>
                 <li><a href="/conf">HBase Configuration</a></li>

http://git-wip-us.apache.org/repos/asf/hbase/blob/da94cd39/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
index 8df53cb..1f579e6 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
@@ -94,9 +94,9 @@
             <ul class="nav navbar-nav">
                 <li><a href="/master-status">Home</a></li>
                 <li><a href="/tablesDetailed.jsp">Table Details</a></li>
-                <li><a href="/logs/">Local logs</a></li>
+                <li><a href="/logs/">Local Logs</a></li>
                 <li><a href="/logLevel">Log Level</a></li>
-                <li><a href="/dump">Debug dump</a></li>
+                <li><a href="/dump">Debug Dump</a></li>
                 <li><a href="/jmx">Metrics Dump</a></li>
                 <% if (HBaseConfiguration.isShowConfInServlet()) { %>
                 <li><a href="/conf">HBase Configuration</a></li>
@@ -170,9 +170,9 @@
             <ul class="nav navbar-nav">
                 <li><a href="/master-status">Home</a></li>
                 <li><a href="/tablesDetailed.jsp">Table Details</a></li>
-                <li><a href="/logs/">Local logs</a></li>
+                <li><a href="/logs/">Local Logs</a></li>
                 <li><a href="/logLevel">Log Level</a></li>
-                <li><a href="/dump">Debug dump</a></li>
+                <li><a href="/dump">Debug Dump</a></li>
                 <li><a href="/jmx">Metrics Dump</a></li>
             </ul>
         </div><!--/.nav-collapse -->

http://git-wip-us.apache.org/repos/asf/hbase/blob/da94cd39/hbase-server/src/main/resources/hbase-webapps/master/tablesDetailed.jsp
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/tablesDetailed.jsp b/hbase-server/src/main/resources/hbase-webapps/master/tablesDetailed.jsp
index e586322..9bed70b 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/tablesDetailed.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/tablesDetailed.jsp
@@ -62,9 +62,9 @@
               <ul class="nav navbar-nav">
                   <li class="active"><a href="/master-status">Home</a></li>
                   <li><a href="/tablesDetailed.jsp">Table Details</a></li>
-                  <li><a href="/logs/">Local logs</a></li>
+                  <li><a href="/logs/">Local Logs</a></li>
                   <li><a href="/logLevel">Log Level</a></li>
-                  <li><a href="/dump">Debug dump</a></li>
+                  <li><a href="/dump">Debug Dump</a></li>
                   <li><a href="/jmx">Metrics Dump</a></li>
                   <% if (HBaseConfiguration.isShowConfInServlet()) { %>
                   <li><a href="/conf">HBase Configuration</a></li>

http://git-wip-us.apache.org/repos/asf/hbase/blob/da94cd39/hbase-server/src/main/resources/hbase-webapps/master/zk.jsp
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/zk.jsp b/hbase-server/src/main/resources/hbase-webapps/master/zk.jsp
index 0c6d88e..72dd000 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/zk.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/zk.jsp
@@ -59,9 +59,9 @@
                     <ul class="nav navbar-nav">
                         <li><a href="/master-status">Home</a></li>
                         <li><a href="/tablesDetailed.jsp">Table Details</a></li>
-                        <li><a href="/logs/">Local logs</a></li>
+                        <li><a href="/logs/">Local Logs</a></li>
                         <li><a href="/logLevel">Log Level</a></li>
-                        <li><a href="/dump">Debug dump</a></li>
+                        <li><a href="/dump">Debug Dump</a></li>
                         <li><a href="/jmx">Metrics Dump</a></li>
                         <% if (HBaseConfiguration.isShowConfInServlet()) { %>
                         <li><a href="/conf">HBase Configuration</a></li>

http://git-wip-us.apache.org/repos/asf/hbase/blob/da94cd39/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
index 5f8458a..c4e0503 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
@@ -42,7 +42,7 @@ import org.apache.hadoop.hbase.SmallTests;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
-import org.apache.hadoop.hbase.io.hfile.CachedBlock;
+import org.apache.hadoop.hbase.io.hfile.LruCachedBlock;
 import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HStore;
@@ -279,8 +279,8 @@ public class TestHeapSize  {
     // CachedBlock Fixed Overhead
     // We really need "deep" sizing but ClassSize does not do this.
     // Perhaps we should do all these more in this style....
-    cl = CachedBlock.class;
-    actual = CachedBlock.PER_BLOCK_OVERHEAD;
+    cl = LruCachedBlock.class;
+    actual = LruCachedBlock.PER_BLOCK_OVERHEAD;
     expected = ClassSize.estimateBase(cl, false);
     expected += ClassSize.estimateBase(String.class, false);
     expected += ClassSize.estimateBase(ByteBuffer.class, false);

http://git-wip-us.apache.org/repos/asf/hbase/blob/da94cd39/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomSeek.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomSeek.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomSeek.java
deleted file mode 100644
index 49d810c..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomSeek.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.io.hfile;
-
-import java.io.BufferedReader;
-import java.io.FileReader;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.LocalFileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
-import org.apache.hadoop.hbase.util.Bytes;
-
-/**
- * Random seek test.
- */
-public class RandomSeek {
-  private static List<String> slurp(String fname) throws IOException {
-    BufferedReader istream = new BufferedReader(new FileReader(fname));
-    String str;
-    List<String> l = new ArrayList<String>();
-    while ( (str=istream.readLine()) != null) {
-      String [] parts = str.split(",");
-      l.add(parts[0] + ":" + parts[1] + ":" + parts[2]);
-    }
-    istream.close();
-    return l;
-  }
-
-  private static String randKey(List<String> keys) {
-    Random r = new Random();
-    //return keys.get(r.nextInt(keys.size()));
-    return "2" + Integer.toString(7+r.nextInt(2)) + Integer.toString(r.nextInt(100));
-    //return new String(r.nextInt(100));
-  }
-
-  public static void main(String [] argv) throws IOException {
-    Configuration conf = new Configuration();
-    conf.setInt("io.file.buffer.size", 64*1024);
-    RawLocalFileSystem rlfs = new RawLocalFileSystem();
-    rlfs.setConf(conf);
-    LocalFileSystem lfs = new LocalFileSystem(rlfs);
-
-    Path path = new Path("/Users/ryan/rfile.big.txt");
-    long start = System.currentTimeMillis();
-    SimpleBlockCache cache = new SimpleBlockCache();
-    CacheConfig cacheConf = new CacheConfig(cache, true, false, false, false,
-        false, false, false, true);
-
-    Reader reader = HFile.createReader(lfs, path, cacheConf, conf);
-    reader.loadFileInfo();
-    System.out.println(reader.getTrailer());
-    long end = System.currentTimeMillis();
-
-    System.out.println("Index read time: " + (end - start));
-
-    List<String> keys = slurp("/Users/ryan/xaa.50k");
-
-    // Get a scanner that doesn't cache and that uses pread.
-    HFileScanner scanner = reader.getScanner(false, true);
-    int count;
-    long totalBytes = 0;
-    int notFound = 0;
-
-    start = System.nanoTime();
-    for(count = 0; count < 500000; ++count) {
-      String key = randKey(keys);
-      byte [] bkey = Bytes.toBytes(key);
-      int res = scanner.seekTo(bkey);
-      if (res == 0) {
-        ByteBuffer k = scanner.getKey();
-        ByteBuffer v = scanner.getValue();
-        totalBytes += k.limit();
-        totalBytes += v.limit();
-      } else {
-        ++ notFound;
-      }
-      if (res == -1) {
-        scanner.seekTo();
-      }
-      // Scan for another 1000 rows.
-      for (int i = 0; i < 1000; ++i) {
-        if (!scanner.next())
-          break;
-        ByteBuffer k = scanner.getKey();
-        ByteBuffer v = scanner.getValue();
-        totalBytes += k.limit();
-        totalBytes += v.limit();
-      }
-
-      if ( count % 1000 == 0 ) {
-        end = System.nanoTime();
-
-            System.out.println("Cache block count: " + cache.size() + " dumped: "+ cache.dumps);
-            //System.out.println("Cache size: " + cache.heapSize());
-            double msTime = ((end - start) / 1000000.0);
-            System.out.println("Seeked: "+ count + " in " + msTime + " (ms) "
-                + (1000.0 / msTime ) + " seeks/ms "
-                + (msTime / 1000.0) + " ms/seek");
-
-            start = System.nanoTime();
-      }
-    }
-    System.out.println("Total bytes: " + totalBytes + " not found: " + notFound);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/da94cd39/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheColumnFamilySummary.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheColumnFamilySummary.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheColumnFamilySummary.java
deleted file mode 100644
index a45dad0..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheColumnFamilySummary.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.io.hfile;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.hadoop.hbase.SmallTests;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-/**
- * Tests the BlockCacheColumnFamilySummary class
- * 
- */
-@Category(SmallTests.class)
-public class TestBlockCacheColumnFamilySummary {
-
-
-  /**
-   * 
-   */
-  @Test
-  public void testEquals()  {
-
-    BlockCacheColumnFamilySummary e1 = new BlockCacheColumnFamilySummary();
-    e1.setTable("table1");
-    e1.setColumnFamily("cf1");
-    
-    BlockCacheColumnFamilySummary e2 = new BlockCacheColumnFamilySummary();
-    e2.setTable("table1");
-    e2.setColumnFamily("cf1");
-
-    assertEquals("bcse", e1, e2);
-  }
-
-  /**
-   * 
-   */
-  @Test
-  public void testNotEquals() {
-
-    BlockCacheColumnFamilySummary e1 = new BlockCacheColumnFamilySummary();
-    e1.setTable("table1");
-    e1.setColumnFamily("cf1");
-    
-    BlockCacheColumnFamilySummary e2 = new BlockCacheColumnFamilySummary();
-    e2.setTable("tablexxxxxx");
-    e2.setColumnFamily("cf1");
-
-    assertTrue("bcse", ! e1.equals(e2));
-  }
-
-  /**
-   * 
-   */
-  @Test
-  public void testMapLookup() {
-    
-    Map<BlockCacheColumnFamilySummary, BlockCacheColumnFamilySummary> bcs = 
-      new HashMap<BlockCacheColumnFamilySummary, BlockCacheColumnFamilySummary>();
-
-    BlockCacheColumnFamilySummary e1 = new BlockCacheColumnFamilySummary("table1","cf1");
-
-    BlockCacheColumnFamilySummary lookup = bcs.get(e1);
-
-    if (lookup == null) {
-      lookup = BlockCacheColumnFamilySummary.create(e1);
-      bcs.put(e1,lookup);
-      lookup.incrementBlocks();
-      lookup.incrementHeapSize(100L);
-    }
-
-    BlockCacheColumnFamilySummary e2 = new BlockCacheColumnFamilySummary("table1","cf1");
-
-    BlockCacheColumnFamilySummary l2 = bcs.get(e2);
-    assertEquals("blocks",1,l2.getBlocks());
-    assertEquals("heap",100L,l2.getHeapSize());
-  }
-
-  /**
-   * 
-   */
-  @Test
-  public void testMapEntry() {
-    
-    Map<BlockCacheColumnFamilySummary, BlockCacheColumnFamilySummary> bcs = 
-      new HashMap<BlockCacheColumnFamilySummary, BlockCacheColumnFamilySummary>();
-
-    BlockCacheColumnFamilySummary e1 = new BlockCacheColumnFamilySummary("table1","cf1");
-    bcs.put(e1, e1);
-    
-    BlockCacheColumnFamilySummary e2 = new BlockCacheColumnFamilySummary("table1","cf1");
-    bcs.put(e2, e2);
-    
-    BlockCacheColumnFamilySummary e3 = new BlockCacheColumnFamilySummary("table1","cf1");
-    bcs.put(e3, e3);
-    
-    assertEquals("mapSize",1,bcs.size());
-  }
-
-
-}
-

http://git-wip-us.apache.org/repos/asf/hbase/blob/da94cd39/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java
index d7f9cbb..1bec7f4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java
@@ -40,7 +40,7 @@ public class TestCachedBlockQueue extends TestCase {
     CachedBlock cb9 = new CachedBlock(1000, "cb9", 9);
     CachedBlock cb10 = new CachedBlock(1500, "cb10", 10);
 
-    CachedBlockQueue queue = new CachedBlockQueue(10000,1000);
+    LruCachedBlockQueue queue = new LruCachedBlockQueue(10000,1000);
 
     queue.add(cb1);
     queue.add(cb2);
@@ -78,7 +78,7 @@ public class TestCachedBlockQueue extends TestCase {
     CachedBlock cb9 = new CachedBlock(1000, "cb9", 9);
     CachedBlock cb10 = new CachedBlock(1500, "cb10", 10);
 
-    CachedBlockQueue queue = new CachedBlockQueue(10000,1000);
+    LruCachedBlockQueue queue = new LruCachedBlockQueue(10000,1000);
 
     queue.add(cb1);
     queue.add(cb2);
@@ -110,7 +110,7 @@ public class TestCachedBlockQueue extends TestCase {
     }
   }
 
-  private static class CachedBlock extends org.apache.hadoop.hbase.io.hfile.CachedBlock
+  private static class CachedBlock extends org.apache.hadoop.hbase.io.hfile.LruCachedBlock
   {
     public CachedBlock(final long heapSize, String name, long accessTime) {
       super(new BlockCacheKey(name, 0),

http://git-wip-us.apache.org/repos/asf/hbase/blob/da94cd39/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java
index fc7bcf6..a7d13577 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java
@@ -67,7 +67,7 @@ public class TestLruBlockCache {
 
     // wait until at least one eviction has run
     int n = 0;
-    while(cache.getEvictionCount() == 0) {
+    while(cache.getStats().getEvictionCount() == 0) {
       Thread.sleep(200);
       assertTrue("Eviction never happened.", n++ < 20);
     }
@@ -87,7 +87,7 @@ public class TestLruBlockCache {
       assertTrue("Cache never stabilized.", n++ < 20);
     }
 
-    long evictionCount = cache.getEvictionCount();
+    long evictionCount = cache.getStats().getEvictionCount();
     assertTrue(evictionCount >= 1);
     System.out.println("Background Evictions run: " + evictionCount);
   }
@@ -136,7 +136,7 @@ public class TestLruBlockCache {
     }
 
     // Expect no evictions
-    assertEquals(0, cache.getEvictionCount());
+    assertEquals(0, cache.getStats().getEvictionCount());
     Thread t = new LruBlockCache.StatisticsThread(cache);
     t.start();
     t.join();
@@ -161,7 +161,7 @@ public class TestLruBlockCache {
     }
 
     // A single eviction run should have occurred
-    assertEquals(1, cache.getEvictionCount());
+    assertEquals(1, cache.getStats().getEvictionCount());
 
     // Our expected size overruns acceptable limit
     assertTrue(expectedCacheSize >
@@ -209,10 +209,10 @@ public class TestLruBlockCache {
     }
 
     // A single eviction run should have occurred
-    assertEquals(cache.getEvictionCount(), 1);
+    assertEquals(cache.getStats().getEvictionCount(), 1);
 
     // We expect two entries evicted
-    assertEquals(cache.getEvictedCount(), 2);
+    assertEquals(cache.getStats().getEvictedCount(), 2);
 
     // Our expected size overruns acceptable limit
     assertTrue(expectedCacheSize >
@@ -283,7 +283,7 @@ public class TestLruBlockCache {
     }
 
     // Do not expect any evictions yet
-    assertEquals(0, cache.getEvictionCount());
+    assertEquals(0, cache.getStats().getEvictionCount());
 
     // Verify cache size
     assertEquals(expectedCacheSize, cache.heapSize());
@@ -292,8 +292,8 @@ public class TestLruBlockCache {
     cache.cacheBlock(singleBlocks[3].cacheKey, singleBlocks[3]);
 
     // Single eviction, one thing evicted
-    assertEquals(1, cache.getEvictionCount());
-    assertEquals(1, cache.getEvictedCount());
+    assertEquals(1, cache.getStats().getEvictionCount());
+    assertEquals(1, cache.getStats().getEvictedCount());
 
     // Verify oldest single block is the one evicted
     assertEquals(null, cache.getBlock(singleBlocks[0].cacheKey, true, false, true));
@@ -305,8 +305,8 @@ public class TestLruBlockCache {
     cache.cacheBlock(singleBlocks[4].cacheKey, singleBlocks[4]);
 
     // Two evictions, two evicted.
-    assertEquals(2, cache.getEvictionCount());
-    assertEquals(2, cache.getEvictedCount());
+    assertEquals(2, cache.getStats().getEvictionCount());
+    assertEquals(2, cache.getStats().getEvictedCount());
 
     // Oldest multi block should be evicted now
     assertEquals(null, cache.getBlock(multiBlocks[0].cacheKey, true, false, true));
@@ -315,8 +315,8 @@ public class TestLruBlockCache {
     cache.cacheBlock(memoryBlocks[3].cacheKey, memoryBlocks[3], true);
 
     // Three evictions, three evicted.
-    assertEquals(3, cache.getEvictionCount());
-    assertEquals(3, cache.getEvictedCount());
+    assertEquals(3, cache.getStats().getEvictionCount());
+    assertEquals(3, cache.getStats().getEvictedCount());
 
     // Oldest memory block should be evicted now
     assertEquals(null, cache.getBlock(memoryBlocks[0].cacheKey, true, false, true));
@@ -326,8 +326,8 @@ public class TestLruBlockCache {
     cache.cacheBlock(bigBlocks[0].cacheKey, bigBlocks[0]);
 
     // Four evictions, six evicted (inserted block 3X size, expect +3 evicted)
-    assertEquals(4, cache.getEvictionCount());
-    assertEquals(6, cache.getEvictedCount());
+    assertEquals(4, cache.getStats().getEvictionCount());
+    assertEquals(6, cache.getStats().getEvictedCount());
 
     // Expect three remaining singles to be evicted
     assertEquals(null, cache.getBlock(singleBlocks[2].cacheKey, true, false, true));
@@ -341,8 +341,8 @@ public class TestLruBlockCache {
     cache.cacheBlock(bigBlocks[1].cacheKey, bigBlocks[1]);
 
     // Five evictions, nine evicted (3 new)
-    assertEquals(5, cache.getEvictionCount());
-    assertEquals(9, cache.getEvictedCount());
+    assertEquals(5, cache.getStats().getEvictionCount());
+    assertEquals(9, cache.getStats().getEvictedCount());
 
     // Expect three remaining multis to be evicted
     assertEquals(null, cache.getBlock(singleBlocks[1].cacheKey, true, false, true));
@@ -353,8 +353,8 @@ public class TestLruBlockCache {
     cache.cacheBlock(bigBlocks[2].cacheKey, bigBlocks[2], true);
 
     // Six evictions, twelve evicted (3 new)
-    assertEquals(6, cache.getEvictionCount());
-    assertEquals(12, cache.getEvictedCount());
+    assertEquals(6, cache.getStats().getEvictionCount());
+    assertEquals(12, cache.getStats().getEvictedCount());
 
     // Expect three remaining in-memory to be evicted
     assertEquals(null, cache.getBlock(memoryBlocks[1].cacheKey, true, false, true));
@@ -398,23 +398,23 @@ public class TestLruBlockCache {
     cache.cacheBlock(singleBlocks[4].cacheKey, singleBlocks[4]);
     expectedCacheSize += singleBlocks[4].cacheBlockHeapSize();
     // Do not expect any evictions yet
-    assertEquals(0, cache.getEvictionCount());
+    assertEquals(0, cache.getStats().getEvictionCount());
     // Verify cache size
     assertEquals(expectedCacheSize, cache.heapSize());
 
     // 1. Insert a memory block, oldest single should be evicted, si:mu:me = 4:4:1
     cache.cacheBlock(memoryBlocks[0].cacheKey, memoryBlocks[0], true);
     // Single eviction, one block evicted
-    assertEquals(1, cache.getEvictionCount());
-    assertEquals(1, cache.getEvictedCount());
+    assertEquals(1, cache.getStats().getEvictionCount());
+    assertEquals(1, cache.getStats().getEvictedCount());
     // Verify oldest single block (index = 0) is the one evicted
     assertEquals(null, cache.getBlock(singleBlocks[0].cacheKey, true, false, true));
 
     // 2. Insert another memory block, another single evicted, si:mu:me = 3:4:2
     cache.cacheBlock(memoryBlocks[1].cacheKey, memoryBlocks[1], true);
     // Two evictions, two evicted.
-    assertEquals(2, cache.getEvictionCount());
-    assertEquals(2, cache.getEvictedCount());
+    assertEquals(2, cache.getStats().getEvictionCount());
+    assertEquals(2, cache.getStats().getEvictedCount());
     // Current oldest single block (index = 1) should be evicted now
     assertEquals(null, cache.getBlock(singleBlocks[1].cacheKey, true, false, true));
 
@@ -424,8 +424,8 @@ public class TestLruBlockCache {
     cache.cacheBlock(memoryBlocks[4].cacheKey, memoryBlocks[4], true);
     cache.cacheBlock(memoryBlocks[5].cacheKey, memoryBlocks[5], true);
     // Three evictions, three evicted.
-    assertEquals(6, cache.getEvictionCount());
-    assertEquals(6, cache.getEvictedCount());
+    assertEquals(6, cache.getStats().getEvictionCount());
+    assertEquals(6, cache.getStats().getEvictedCount());
     // two oldest single blocks and two oldest multi blocks evicted
     assertEquals(null, cache.getBlock(singleBlocks[2].cacheKey, true, false, true));
     assertEquals(null, cache.getBlock(singleBlocks[3].cacheKey, true, false, true));
@@ -438,8 +438,8 @@ public class TestLruBlockCache {
     cache.cacheBlock(memoryBlocks[7].cacheKey, memoryBlocks[7], true);
     cache.cacheBlock(memoryBlocks[8].cacheKey, memoryBlocks[8], true);
     // Three evictions, three evicted.
-    assertEquals(9, cache.getEvictionCount());
-    assertEquals(9, cache.getEvictedCount());
+    assertEquals(9, cache.getStats().getEvictionCount());
+    assertEquals(9, cache.getStats().getEvictedCount());
     // one oldest single block and two oldest multi blocks evicted
     assertEquals(null, cache.getBlock(singleBlocks[4].cacheKey, true, false, true));
     assertEquals(null, cache.getBlock(multiBlocks[2].cacheKey, true, false, true));
@@ -449,8 +449,8 @@ public class TestLruBlockCache {
     // si:mu:me = 0:0:9
     cache.cacheBlock(memoryBlocks[9].cacheKey, memoryBlocks[9], true);
     // one eviction, one evicted.
-    assertEquals(10, cache.getEvictionCount());
-    assertEquals(10, cache.getEvictedCount());
+    assertEquals(10, cache.getStats().getEvictionCount());
+    assertEquals(10, cache.getStats().getEvictedCount());
     // oldest memory block evicted
     assertEquals(null, cache.getBlock(memoryBlocks[0].cacheKey, true, false, true));
 
@@ -459,8 +459,8 @@ public class TestLruBlockCache {
     // si:mu:me = 0:0:9 (no change)
     cache.cacheBlock(singleBlocks[9].cacheKey, singleBlocks[9]);
     // one eviction, one evicted.
-    assertEquals(11, cache.getEvictionCount());
-    assertEquals(11, cache.getEvictedCount());
+    assertEquals(11, cache.getStats().getEvictionCount());
+    assertEquals(11, cache.getStats().getEvictedCount());
     // the single block just cached now evicted (can't evict memory)
     assertEquals(null, cache.getBlock(singleBlocks[9].cacheKey, true, false, true));
   }
@@ -498,10 +498,10 @@ public class TestLruBlockCache {
     }
 
     // An eviction ran
-    assertEquals(1, cache.getEvictionCount());
+    assertEquals(1, cache.getStats().getEvictionCount());
 
     // To drop down to 2/3 capacity, we'll need to evict 4 blocks
-    assertEquals(4, cache.getEvictedCount());
+    assertEquals(4, cache.getStats().getEvictedCount());
 
     // Should have been taken off equally from single and multi
     assertEquals(null, cache.getBlock(singleBlocks[0].cacheKey, true, false, true));
@@ -521,11 +521,11 @@ public class TestLruBlockCache {
     }
 
     // 4 total evictions, 16 total evicted
-    assertEquals(4, cache.getEvictionCount());
-    assertEquals(16, cache.getEvictedCount());
+    assertEquals(4, cache.getStats().getEvictionCount());
+    assertEquals(16, cache.getStats().getEvictedCount());
 
     // Should now have 7 total blocks
-    assertEquals(7, cache.size());
+    assertEquals(7, cache.getBlockCount());
 
   }
 
@@ -566,16 +566,16 @@ public class TestLruBlockCache {
     }
 
     // Do not expect any evictions yet
-    assertEquals(0, cache.getEvictionCount());
+    assertEquals(0, cache.getStats().getEvictionCount());
 
     // Resize to half capacity plus an extra block (otherwise we evict an extra)
     cache.setMaxSize((long)(maxSize * 0.5f));
 
     // Should have run a single eviction
-    assertEquals(1, cache.getEvictionCount());
+    assertEquals(1, cache.getStats().getEvictionCount());
 
     // And we expect 1/2 of the blocks to be evicted
-    assertEquals(15, cache.getEvictedCount());
+    assertEquals(15, cache.getStats().getEvictedCount());
 
     // And the oldest 5 blocks from each category should be gone
     for(int i=0;i<5;i++) {
@@ -711,7 +711,7 @@ public class TestLruBlockCache {
         (numEntries * ClassSize.CONCURRENT_HASHMAP_ENTRY) +
         (LruBlockCache.DEFAULT_CONCURRENCY_LEVEL * ClassSize.CONCURRENT_HASHMAP_SEGMENT);
     long negateBlockSize = (long)(totalOverhead/numEntries);
-    negateBlockSize += CachedBlock.PER_BLOCK_OVERHEAD;
+    negateBlockSize += LruCachedBlock.PER_BLOCK_OVERHEAD;
     return ClassSize.align((long)Math.floor((roughBlockSize - negateBlockSize)*0.99f));
   }
 
@@ -723,7 +723,7 @@ public class TestLruBlockCache {
         (numEntries * ClassSize.CONCURRENT_HASHMAP_ENTRY) +
         (LruBlockCache.DEFAULT_CONCURRENCY_LEVEL * ClassSize.CONCURRENT_HASHMAP_SEGMENT);
     long negateBlockSize = totalOverhead / numEntries;
-    negateBlockSize += CachedBlock.PER_BLOCK_OVERHEAD;
+    negateBlockSize += LruCachedBlock.PER_BLOCK_OVERHEAD;
     return ClassSize.align((long)Math.floor((roughBlockSize - negateBlockSize)*
         LruBlockCache.DEFAULT_ACCEPTABLE_FACTOR));
   }
@@ -745,7 +745,7 @@ public class TestLruBlockCache {
 
     /** Size of the cache block holding this item. Used for verification. */
     public long cacheBlockHeapSize() {
-      return CachedBlock.PER_BLOCK_OVERHEAD
+      return LruCachedBlock.PER_BLOCK_OVERHEAD
           + ClassSize.align(cacheKey.heapSize())
           + ClassSize.align(size);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/da94cd39/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileBlockCacheSummary.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileBlockCacheSummary.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileBlockCacheSummary.java
deleted file mode 100644
index 59dab69..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileBlockCacheSummary.java
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.MediumTests;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.HTableUtil;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.Row;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.io.hfile.BlockCache;
-import org.apache.hadoop.hbase.io.hfile.BlockCacheColumnFamilySummary;
-import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-/**
- * Tests the block cache summary functionality in StoreFile, 
- * which contains the BlockCache
- *
- */
-@Category(MediumTests.class)
-public class TestStoreFileBlockCacheSummary {
-  final Log LOG = LogFactory.getLog(getClass());
-  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();  
-  private static final String TEST_TABLE = "testTable";
-  private static final String TEST_TABLE2 = "testTable2";
-  private static final String TEST_CF = "testFamily";
-  private static byte [] FAMILY = Bytes.toBytes(TEST_CF);
-  private static byte [] QUALIFIER = Bytes.toBytes("testQualifier");
-  private static byte [] VALUE = Bytes.toBytes("testValue");
-
-  private final int TOTAL_ROWS = 4;
-  
-  /**
-   * @throws java.lang.Exception exception
-   */
-  @BeforeClass
-  public static void setUpBeforeClass() throws Exception {
-    TEST_UTIL.startMiniCluster();
-  }
-
-  /**
-   * @throws java.lang.Exception exception
-   */
-  @AfterClass
-  public static void tearDownAfterClass() throws Exception {
-    TEST_UTIL.shutdownMiniCluster();
-  }
-  
-
-  private Put createPut(byte[] family, String row) {
-    Put put = new Put( Bytes.toBytes(row));
-    put.add(family, QUALIFIER, VALUE);
-    return put;
-  }
-  
-  /**
-  * This test inserts data into multiple tables and then reads both tables to ensure
-  * they are in the block cache.
-  *
-  * @throws Exception exception
-  */
- @Test
- public void testBlockCacheSummary() throws Exception {
-   HTable ht = TEST_UTIL.createTable(Bytes.toBytes(TEST_TABLE), FAMILY);
-   addRows(ht, FAMILY);
-
-   HTable ht2 = TEST_UTIL.createTable(Bytes.toBytes(TEST_TABLE2), FAMILY);
-   addRows(ht2, FAMILY);
-
-   TEST_UTIL.flush();
-   
-   scan(ht, FAMILY);
-   scan(ht2, FAMILY);
-      
-   BlockCache bc =
-     new CacheConfig(TEST_UTIL.getConfiguration()).getBlockCache();
-   List<BlockCacheColumnFamilySummary> bcs = 
-     bc.getBlockCacheColumnFamilySummaries(TEST_UTIL.getConfiguration());
-   LOG.info("blockCacheSummary: " + bcs);
-
-   assertTrue("blockCache summary has " + bcs.size() + " entries", bcs.size() >= 2);
-
-   BlockCacheColumnFamilySummary e = bcs.get(bcs.size()-2);
-   assertEquals("table", TEST_TABLE, e.getTable());
-   assertEquals("cf", TEST_CF, e.getColumnFamily());
-
-   e = bcs.get(bcs.size()-1);
-   assertEquals("table", TEST_TABLE2, e.getTable());
-   assertEquals("cf", TEST_CF, e.getColumnFamily());
-
-   ht.close();
-   ht2.close();
- }
-
- private void addRows(HTable ht, byte[] family) throws IOException {
- 
-   List<Row> rows = new ArrayList<Row>();
-   for (int i = 0; i < TOTAL_ROWS;i++) {
-     rows.add(createPut(family, "row" + i));
-   }
-   
-   HTableUtil.bucketRsBatch( ht, rows);
- }
-
- private void scan(HTable ht, byte[] family) throws IOException {
-   Scan scan = new Scan();
-   scan.addColumn(family, QUALIFIER);
-   
-   int count = 0;
-   for(@SuppressWarnings("unused") Result result : ht.getScanner(scan)) {
-     count++;
-   }
-   if (TOTAL_ROWS != count) {
-     throw new IOException("Incorrect number of rows!");
-   }
- }
-
-}
-

http://git-wip-us.apache.org/repos/asf/hbase/blob/da94cd39/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java
index d0868d6..fa3dda6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.replication;
 
 import java.io.IOException;
 import java.util.UUID;
-import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;

http://git-wip-us.apache.org/repos/asf/hbase/blob/da94cd39/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 768c6943..c769c44 100644
--- a/pom.xml
+++ b/pom.xml
@@ -892,7 +892,7 @@
     <commons-math.version>2.1</commons-math.version>
     <collections.version>3.2.1</collections.version>
     <httpclient.version>3.1</httpclient.version>
-    <metrics-core.version>2.1.2</metrics-core.version>
+    <metrics-core.version>2.2.0</metrics-core.version>
     <guava.version>12.0.1</guava.version>
     <jackson.version>1.8.8</jackson.version>
     <jasper.version>5.5.23</jasper.version>


Mime
View raw message