hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject hbase git commit: HBASE-15366 Add doc, trace-level logging, and test around No change in operation, just adding doc., and some helpful logging
Date Thu, 21 Apr 2016 17:56:10 GMT
Repository: hbase
Updated Branches:
  refs/heads/branch-1 1e6d7e811 -> 0b65f2002


HBASE-15366 Add doc, trace-level logging, and test around No change in operation, just adding doc., and some helpful logging

M hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java
 Make it emit its toString in format that matches the way we log
 elsewhere
M hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
 Capitalize statics.
M hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
 Verify and cleanup documentation of hfileblock format at head of class.
 Explain what 'EXTRA_SERIALIZATION_SPACE' is all about.
 Connect how we serialize and deserialize... done in different places
 and one way when pulling from HDFS and another when pulling from cache
 (TO BE FIXED). Shut down a load of public access.
M hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
 Add trace-level logging

Signed-off-by: stack <stack@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0b65f200
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0b65f200
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0b65f200

Branch: refs/heads/branch-1
Commit: 0b65f2002bbdabd699ce12598b1179d1f2d7f1c7
Parents: 1e6d7e8
Author: stack <stack@apache.org>
Authored: Wed Apr 20 23:10:04 2016 -0700
Committer: stack <stack@apache.org>
Committed: Thu Apr 21 10:56:07 2016 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/HConstants.java     |   4 +-
 .../io/encoding/BufferedDataBlockEncoder.java   |   8 +-
 .../hbase/io/encoding/DataBlockEncoder.java     |   3 +-
 .../hadoop/hbase/io/hfile/CacheConfig.java      |  10 +-
 .../org/apache/hadoop/hbase/io/hfile/HFile.java |  11 +-
 .../hadoop/hbase/io/hfile/HFileBlock.java       | 351 +++++++++++--------
 .../hadoop/hbase/io/hfile/HFileReaderV2.java    |   4 +-
 .../hbase/io/hfile/bucket/BucketAllocator.java  |  17 +-
 .../hbase/io/hfile/bucket/BucketCache.java      |  35 +-
 .../hadoop/hbase/regionserver/StoreFile.java    |   1 -
 .../TestFilterListOrOperatorWithBlkCnt.java     |   2 +-
 .../io/encoding/TestDataBlockEncoders.java      |  25 +-
 .../encoding/TestSeekToBlockWithEncoders.java   |   4 +-
 .../io/hfile/TestForceCacheImportantBlocks.java |   6 +-
 .../hadoop/hbase/io/hfile/TestHFileBlock.java   |   2 +-
 .../io/hfile/TestHFileBlockCompatibility.java   |   3 +-
 .../hbase/regionserver/TestBlocksRead.java      |   2 +-
 17 files changed, 284 insertions(+), 204 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/0b65f200/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 5f20183..7a77c7e 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -64,7 +64,9 @@ public final class HConstants {
   public static final byte[] RPC_HEADER = new byte[] { 'H', 'B', 'a', 's' };
   public static final byte RPC_CURRENT_VERSION = 0;
 
-  // HFileBlock constants.
+  // HFileBlock constants. TODO!!!! THESE DEFINES BELONG IN HFILEBLOCK, NOT UP HERE.
+  // Needed down in hbase-common though by encoders but these encoders should not be dealing
+  // in the internals of hfileblocks. Fix encapsulation.
 
   /** The size data structures with minor version is 0 */
   public static final int HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM = MAGIC_LENGTH + 2 * Bytes.SIZEOF_INT

http://git-wip-us.apache.org/repos/asf/hbase/blob/0b65f200/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java
index 74e0567..d808f88 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java
@@ -47,7 +47,9 @@ import org.apache.hadoop.io.WritableUtils;
  */
 @InterfaceAudience.Private
 abstract class BufferedDataBlockEncoder implements DataBlockEncoder {
-
+  /**
+   * TODO: This datablockencoder is dealing in internals of hfileblocks. Purge reference to HFBs
+   */
   private static int INITIAL_KEY_BUFFER_SIZE = 512;
 
   @Override
@@ -967,8 +969,8 @@ abstract class BufferedDataBlockEncoder implements DataBlockEncoder {
     BufferedDataBlockEncodingState state = (BufferedDataBlockEncodingState) encodingCtx
         .getEncodingState();
     // Write the unencodedDataSizeWritten (with header size)
-    Bytes.putInt(uncompressedBytesWithHeader, HConstants.HFILEBLOCK_HEADER_SIZE
-        + DataBlockEncoding.ID_SIZE, state.unencodedDataSizeWritten
+    Bytes.putInt(uncompressedBytesWithHeader,
+      HConstants.HFILEBLOCK_HEADER_SIZE + DataBlockEncoding.ID_SIZE, state.unencodedDataSizeWritten
         );
     if (encodingCtx.getDataBlockEncoding() != DataBlockEncoding.NONE) {
       encodingCtx.postEncoding(BlockType.ENCODED_DATA);

http://git-wip-us.apache.org/repos/asf/hbase/blob/0b65f200/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java
index a9f693b..5452ae5 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java
@@ -38,7 +38,8 @@ import org.apache.hadoop.hbase.io.hfile.HFileContext;
  */
 @InterfaceAudience.Private
 public interface DataBlockEncoder {
-
+// TODO: This Interface should be deprecated and replaced. It presumes hfile and carnal knowledge of
+// Cell internals. It was done for a different time. Remove. Purge.
   /**
    * Starts encoding for a block of KeyValues. Call
    * {@link #endBlockEncoding(HFileBlockEncodingContext, DataOutputStream, byte[])} to finish

http://git-wip-us.apache.org/repos/asf/hbase/blob/0b65f200/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
index e79ba35..8fe81eb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
@@ -82,8 +82,14 @@ public class CacheConfig {
    */
 
   /**
-   * If the chosen ioengine can persist its state across restarts, the path to the file to
-   * persist to.
+   * If the chosen ioengine can persist its state across restarts, the path to the file to persist
+   * to. This file is NOT the data file. It is a file into which we will serialize the map of
+   * what is in the data file. For example, if you pass the following argument as
+   * BUCKET_CACHE_IOENGINE_KEY ("hbase.bucketcache.ioengine"),
+   * <code>file:/tmp/bucketcache.data </code>, then we will write the bucketcache data to the file
+   * <code>/tmp/bucketcache.data</code> but the metadata on where the data is in the supplied file
+   * is an in-memory map that needs to be persisted across restarts. Where to store this
+   * in-memory state is what you supply here: e.g. <code>/tmp/bucketcache.map</code>.
    */
   public static final String BUCKET_CACHE_PERSISTENT_PATH_KEY = 
       "hbase.bucketcache.persistent.path";

http://git-wip-us.apache.org/repos/asf/hbase/blob/0b65f200/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
index 3fb9554..3ee120f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
@@ -178,19 +178,20 @@ public class HFile {
    * The number of bytes per checksum.
    */
   public static final int DEFAULT_BYTES_PER_CHECKSUM = 16 * 1024;
+
   // For measuring number of checksum failures
-  static final Counter checksumFailures = new Counter();
+  static final Counter CHECKSUM_FAILURES = new Counter();
 
-  // for test purpose
-  public static final Counter dataBlockReadCnt = new Counter();
+  // For tests. Gets incremented when we read a block whether from HDFS or from Cache.
+  public static final Counter DATABLOCK_READ_COUNT = new Counter();
 
   /**
    * Number of checksum verification failures. It also
    * clears the counter.
    */
   public static final long getChecksumFailuresCount() {
-    long count = checksumFailures.get();
-    checksumFailures.set(0);
+    long count = CHECKSUM_FAILURES.get();
+    CHECKSUM_FAILURES.set(0);
     return count;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/0b65f200/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index 6cd7b20..af3dd49 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -27,6 +27,8 @@ import java.nio.ByteBuffer;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
@@ -51,78 +53,112 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 
 /**
- * Reading {@link HFile} version 1 and 2 blocks, and writing version 2 blocks.
- * <ul>
- * <li>In version 1 all blocks are always compressed or uncompressed, as
+ * Reads {@link HFile} version 1 and version 2 blocks but writes version 2 blocks only.
+ * Version 2 was introduced in hbase-0.92.0. Does read and write out to the filesystem but also
+ * the read and write to Cache.
+ *
+ * <h3>HFileBlock: Version 1</h3>
+ * As of this writing, there should be no more version 1 blocks found out in the wild. Version 2
+ * as introduced in hbase-0.92.0.
+ * In version 1 all blocks are always compressed or uncompressed, as
  * specified by the {@link HFile}'s compression algorithm, with a type-specific
  * magic record stored in the beginning of the compressed data (i.e. one needs
  * to uncompress the compressed block to determine the block type). There is
  * only a single compression algorithm setting for all blocks. Offset and size
  * information from the block index are required to read a block.
- * <li>In version 2 a block is structured as follows:
+ * <h3>HFileBlock: Version 2</h3>
+ * In version 2, a block is structured as follows:
  * <ul>
- * <li>header (see Writer#finishBlock())
+ * <li><b>Header:</b> See Writer#putHeader(); header total size is HFILEBLOCK_HEADER_SIZE)
  * <ul>
- * <li>Magic record identifying the block type (8 bytes)
- * <li>Compressed block size, excluding header, including checksum (4 bytes)
- * <li>Uncompressed block size, excluding header, excluding checksum (4 bytes)
+ * <li>Magic record identifying the {@link BlockType} (8 bytes): e.g. <code>DATABLK*</code>
+ * <li>Compressed -- a.k.a 'on disk' -- block size, excluding header, but including
+ *     tailing checksum bytes (4 bytes)
+ * <li>Uncompressed block size, excluding header, and excluding checksum bytes (4 bytes)
  * <li>The offset of the previous block of the same type (8 bytes). This is
- * used to be able to navigate to the previous block without going to the block
+ * used to navigate to the previous block without having to go to the block index
  * <li>For minorVersions &gt;=1, the ordinal describing checksum type (1 byte)
  * <li>For minorVersions &gt;=1, the number of data bytes/checksum chunk (4 bytes)
- * <li>For minorVersions &gt;=1, the size of data on disk, including header,
+ * <li>For minorVersions &gt;=1, the size of data 'on disk', including header,
  * excluding checksums (4 bytes)
  * </ul>
  * </li>
- * <li>Raw/Compressed/Encrypted/Encoded data. The compression algorithm is the
+ * <li><b>Raw/Compressed/Encrypted/Encoded data:</b> The compression algorithm is the
  * same for all the blocks in the {@link HFile}, similarly to what was done in
- * version 1.
- * <li>For minorVersions &gt;=1, a series of 4 byte checksums, one each for
+ * version 1. If compression is NONE, this is just raw, serialized Cells.
+ * <li><b>Tail:</b> For minorVersions &gt;=1, a series of 4 byte checksums, one each for
  * the number of bytes specified by bytesPerChecksum.
  * </ul>
- * </ul>
+ * <p>Be aware that when we read from HDFS, we overread pulling in the next blocks' header too.
+ * We do this to save having to do two seeks to read an HFileBlock; a seek to read the header
+ * to figure lengths, etc., and then another seek to pull in the data.
  */
 @InterfaceAudience.Private
 @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="HE_EQUALS_USE_HASHCODE",
   justification="Fix!!! Fine for now bug FIXXXXXXX!!!!")
 public class HFileBlock implements Cacheable {
+  private static final Log LOG = LogFactory.getLog(HFileBlock.class);
 
   /**
-   * On a checksum failure on a Reader, these many suceeding read
-   * requests switch back to using hdfs checksums before auto-reenabling
-   * hbase checksum verification.
+   * On a checksum failure, do these many succeeding read requests using hdfs checksums before
+   * auto-reenabling hbase checksum verification.
    */
   static final int CHECKSUM_VERIFICATION_NUM_IO_THRESHOLD = 3;
 
+  private static int UNSET = -1;
   public static final boolean FILL_HEADER = true;
   public static final boolean DONT_FILL_HEADER = false;
 
-  /**
-   * The size of block header when blockType is {@link BlockType#ENCODED_DATA}.
-   * This extends normal header by adding the id of encoder.
-   */
-  public static final int ENCODED_HEADER_SIZE = HConstants.HFILEBLOCK_HEADER_SIZE
-      + DataBlockEncoding.ID_SIZE;
-
-  static final byte[] DUMMY_HEADER_NO_CHECKSUM =
-     new byte[HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM];
-
   public static final int BYTE_BUFFER_HEAP_SIZE = (int) ClassSize.estimateBase(
       ByteBuffer.wrap(new byte[0], 0, 0).getClass(), false);
 
-  // meta.usesHBaseChecksum+offset+nextBlockOnDiskSizeWithHeader
-  public static final int EXTRA_SERIALIZATION_SPACE = Bytes.SIZEOF_BYTE + Bytes.SIZEOF_INT
-      + Bytes.SIZEOF_LONG;
+  /**
+   * See #blockDeserializer method for more info.
+   * 13 bytes of extra stuff stuck on the end of the HFileBlock that we pull in from HDFS (note,
+   * when we read from HDFS, we pull in an HFileBlock AND the header of the next block if one).
+   * The 13 bytes are: usesHBaseChecksum (1 byte) + offset of this block (long) +
+   * nextBlockOnDiskSizeWithHeader (int).
+   */
+  public static final int EXTRA_SERIALIZATION_SPACE =
+      Bytes.SIZEOF_BYTE + Bytes.SIZEOF_INT + Bytes.SIZEOF_LONG;
 
   /**
    * Each checksum value is an integer that can be stored in 4 bytes.
    */
   static final int CHECKSUM_SIZE = Bytes.SIZEOF_INT;
 
+  static final byte[] DUMMY_HEADER_NO_CHECKSUM =
+      new byte[HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM];
+
+  /**
+   * Used deserializing blocks from Cache.
+   *
+   * Serializing to cache is a little hard to follow. See Writer#finishBlock for where it is done.
+   * When we start to append to a new HFileBlock,
+   * we skip over where the header should go before we start adding Cells. When the block is
+   * done, we'll then go back and fill in the header and the checksum tail. Be aware that what
+   * gets serialized into the blockcache is a byte array that contains an HFileBlock followed by
+   * its checksums and then the header of the next HFileBlock (needed to help navigate), followed
+   * again by an extra 13 bytes of meta info needed when time to recreate the HFileBlock from cache.
+   *
+   * ++++++++++++++
+   * + HFileBlock +
+   * ++++++++++++++
+   * + Checksums  +
+   * ++++++++++++++
+   * + NextHeader +
+   * ++++++++++++++
+   * + ExtraMeta! +
+   * ++++++++++++++
+   *
+   * TODO: Fix it so we do NOT put the NextHeader into blockcache. It is not necessary.
+   */
   static final CacheableDeserializer<Cacheable> blockDeserializer =
       new CacheableDeserializer<Cacheable>() {
         public HFileBlock deserialize(ByteBuffer buf, boolean reuse) throws IOException{
+          // Rewind to just before the EXTRA_SERIALIZATION_SPACE.
           buf.limit(buf.limit() - HFileBlock.EXTRA_SERIALIZATION_SPACE).rewind();
+          // Get a new buffer to pass the deserialized HFileBlock for it to 'own'.
           ByteBuffer newByteBuffer;
           if (reuse) {
             newByteBuffer = buf.slice();
@@ -130,6 +166,7 @@ public class HFileBlock implements Cacheable {
            newByteBuffer = ByteBuffer.allocate(buf.limit());
            newByteBuffer.put(buf);
           }
+          // Read out the EXTRA_SERIALIZATION_SPACE content and shove into our HFileBlock.
           buf.position(buf.limit());
           buf.limit(buf.limit() + HFileBlock.EXTRA_SERIALIZATION_SPACE);
           boolean usesChecksum = buf.get() == (byte)1;
@@ -161,18 +198,28 @@ public class HFileBlock implements Cacheable {
   /** Type of block. Header field 0. */
   private BlockType blockType;
 
-  /** Size on disk excluding header, including checksum. Header field 1. */
+  /**
+   * Size on disk excluding header, including checksum. Header field 1.
+   * @see Writer#putHeader(byte[], int, int, int, int)
+   */
   private int onDiskSizeWithoutHeader;
 
-  /** Size of pure data. Does not include header or checksums. Header field 2. */
+  /**
+   * Size of pure data. Does not include header or checksums. Header field 2.
+   * @see Writer#putHeader(byte[], int, int, int, int)
+   */
   private final int uncompressedSizeWithoutHeader;
 
-  /** The offset of the previous block on disk. Header field 3. */
+  /**
+   * The offset of the previous block on disk. Header field 3.
+   * @see Writer#putHeader(byte[], int, int, int, int)
+   */
   private final long prevBlockOffset;
 
   /**
    * Size on disk of header + data. Excludes checksum. Header field 6,
    * OR calculated from {@link #onDiskSizeWithoutHeader} when using HDFS checksum.
+   * @see Writer#putHeader(byte[], int, int, int, int)
    */
   private final int onDiskDataSizeWithHeader;
 
@@ -186,7 +233,7 @@ public class HFileBlock implements Cacheable {
    * The offset of this block in the file. Populated by the reader for
    * convenience of access. This offset is not part of the block header.
    */
-  private long offset = -1;
+  private long offset = UNSET;
 
   /**
    * The on-disk size of the next block, including the header, obtained by
@@ -197,7 +244,7 @@ public class HFileBlock implements Cacheable {
 
   /**
    * Creates a new {@link HFile} block from the given fields. This constructor
-   * is mostly used when the block data has already been read and uncompressed,
+   * is used when the block data has already been read and uncompressed,
    * and is sitting in a byte buffer.
    *
    * @param blockType the type of this block, see {@link BlockType}
@@ -205,8 +252,8 @@ public class HFileBlock implements Cacheable {
    * @param uncompressedSizeWithoutHeader see {@link #uncompressedSizeWithoutHeader}
    * @param prevBlockOffset see {@link #prevBlockOffset}
    * @param buf block header ({@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes) followed by
-   *          uncompressed data. This
-   * @param fillHeader when true, parse {@code buf} and override the first 4 header fields.
+   *          uncompressed data.
+   * @param fillHeader when true, write the first 4 header fields into passed buffer.
    * @param offset the file offset the block was read from
    * @param onDiskDataSizeWithHeader see {@link #onDiskDataSizeWithHeader}
    * @param fileContext HFile meta data
@@ -222,8 +269,9 @@ public class HFileBlock implements Cacheable {
     this.offset = offset;
     this.onDiskDataSizeWithHeader = onDiskDataSizeWithHeader;
     this.fileContext = fileContext;
-    if (fillHeader)
+    if (fillHeader) {
       overwriteHeader();
+    }
     this.buf.rewind();
   }
 
@@ -265,8 +313,8 @@ public class HFileBlock implements Cacheable {
     } else {
       contextBuilder.withChecksumType(ChecksumType.NULL);
       contextBuilder.withBytesPerCheckSum(0);
-      this.onDiskDataSizeWithHeader = onDiskSizeWithoutHeader +
-                                       HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM;
+      this.onDiskDataSizeWithHeader =
+          onDiskSizeWithoutHeader + HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM;
     }
     this.fileContext = contextBuilder.build();
     buf = b;
@@ -296,14 +344,14 @@ public class HFileBlock implements Cacheable {
   /**
    * @return the on-disk size of the data part + checksum (header excluded).
    */
-  public int getOnDiskSizeWithoutHeader() {
+  int getOnDiskSizeWithoutHeader() {
     return onDiskSizeWithoutHeader;
   }
 
   /**
    * @return the uncompressed size of data part (header and checksum excluded).
    */
-   public int getUncompressedSizeWithoutHeader() {
+  public int getUncompressedSizeWithoutHeader() {
     return uncompressedSizeWithoutHeader;
   }
 
@@ -311,7 +359,7 @@ public class HFileBlock implements Cacheable {
    * @return the offset of the previous block of the same type in the file, or
    *         -1 if unknown
    */
-  public long getPrevBlockOffset() {
+  long getPrevBlockOffset() {
     return prevBlockOffset;
   }
 
@@ -347,9 +395,9 @@ public class HFileBlock implements Cacheable {
   /**
    * Returns the buffer this block stores internally. The clients must not
    * modify the buffer object. This method has to be public because it is
-   * used in {@link org.apache.hadoop.hbase.util.CompoundBloomFilter} 
-   * to avoid object creation on every Bloom filter lookup, but has to 
-   * be used with caution. Checksum data is not included in the returned 
+   * used in {@link org.apache.hadoop.hbase.util.CompoundBloomFilter}
+   * to avoid object creation on every Bloom filter lookup, but has to
+   * be used with caution. Checksum data is not included in the returned
    * buffer but header data is.
    *
    * @return the buffer of this block for read-only operations
@@ -445,20 +493,20 @@ public class HFileBlock implements Cacheable {
   @Override
   public String toString() {
     StringBuilder sb = new StringBuilder()
-      .append("HFileBlock [")
-      .append(" fileOffset=").append(offset)
-      .append(" headerSize()=").append(headerSize())
-      .append(" blockType=").append(blockType)
-      .append(" onDiskSizeWithoutHeader=").append(onDiskSizeWithoutHeader)
-      .append(" uncompressedSizeWithoutHeader=").append(uncompressedSizeWithoutHeader)
-      .append(" prevBlockOffset=").append(prevBlockOffset)
-      .append(" isUseHBaseChecksum()=").append(fileContext.isUseHBaseChecksum());
+      .append("[")
+      .append("blockType=").append(blockType)
+      .append(", fileOffset=").append(offset)
+      .append(", headerSize=").append(headerSize())
+      .append(", onDiskSizeWithoutHeader=").append(onDiskSizeWithoutHeader)
+      .append(", uncompressedSizeWithoutHeader=").append(uncompressedSizeWithoutHeader)
+      .append(", prevBlockOffset=").append(prevBlockOffset)
+      .append(", isUseHBaseChecksum=").append(fileContext.isUseHBaseChecksum());
     if (fileContext.isUseHBaseChecksum()) {
-      sb.append(" checksumType=").append(ChecksumType.codeToType(this.buf.get(24)))
-        .append(" bytesPerChecksum=").append(this.buf.getInt(24 + 1))
-        .append(" onDiskDataSizeWithHeader=").append(onDiskDataSizeWithHeader);
+      sb.append(", checksumType=").append(ChecksumType.codeToType(this.buf.get(24)))
+        .append(", bytesPerChecksum=").append(this.buf.getInt(24 + 1))
+        .append(", onDiskDataSizeWithHeader=").append(onDiskDataSizeWithHeader);
     } else {
-      sb.append(" onDiskDataSizeWithHeader=").append(onDiskDataSizeWithHeader)
+      sb.append(", onDiskDataSizeWithHeader=").append(onDiskDataSizeWithHeader)
         .append("(").append(onDiskSizeWithoutHeader)
         .append("+").append(HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM).append(")");
     }
@@ -473,13 +521,13 @@ public class HFileBlock implements Cacheable {
       bufWithoutHeader.get(dataBeginBytes);
       dataBegin = Bytes.toStringBinary(dataBeginBytes);
     }
-    sb.append(" getOnDiskSizeWithHeader()=").append(getOnDiskSizeWithHeader())
-      .append(" totalChecksumBytes()=").append(totalChecksumBytes())
-      .append(" isUnpacked()=").append(isUnpacked())
-      .append(" buf=[ ").append(buf).append(" ]")
-      .append(" dataBeginsWith=").append(dataBegin)
-      .append(" fileContext=").append(fileContext)
-      .append(" ]");
+    sb.append(", getOnDiskSizeWithHeader=").append(getOnDiskSizeWithHeader())
+      .append(", totalChecksumBytes=").append(totalChecksumBytes())
+      .append(", isUnpacked=").append(isUnpacked())
+      .append(", buf=[").append(buf).append("]")
+      .append(", dataBeginsWith=").append(dataBegin)
+      .append(", fileContext=").append(fileContext)
+      .append("]");
     return sb.toString();
   }
 
@@ -607,19 +655,8 @@ public class HFileBlock implements Cacheable {
     }
   }
 
-  /**
-   * @param expectedType the expected type of this block
-   * @throws IOException if this block's type is different than expected
-   */
-  public void expectType(BlockType expectedType) throws IOException {
-    if (blockType != expectedType) {
-      throw new IOException("Invalid block type: expected=" + expectedType
-          + ", actual=" + blockType);
-    }
-  }
-
   /** @return the offset of this block in the file it was read from */
-  public long getOffset() {
+  long getOffset() {
     if (offset < 0) {
       throw new IllegalStateException(
           "HFile block offset not initialized properly");
@@ -660,21 +697,20 @@ public class HFileBlock implements Cacheable {
   }
 
   /**
-   * Read from an input stream. Analogous to
+   * Read from an input stream at least <code>necessaryLen</code> and if possible,
+   * <code>extraLen</code> also if available. Analogous to
    * {@link IOUtils#readFully(InputStream, byte[], int, int)}, but specifies a
-   * number of "extra" bytes that would be desirable but not absolutely
-   * necessary to read.
+   * number of "extra" bytes to also optionally read.
    *
    * @param in the input stream to read from
    * @param buf the buffer to read into
    * @param bufOffset the destination offset in the buffer
-   * @param necessaryLen the number of bytes that are absolutely necessary to
-   *          read
+   * @param necessaryLen the number of bytes that are absolutely necessary to read
    * @param extraLen the number of extra bytes that would be nice to read
    * @return true if succeeded reading the extra bytes
    * @throws IOException if failed to read the necessary bytes
    */
-  public static boolean readWithExtra(InputStream in, byte[] buf,
+  static boolean readWithExtra(InputStream in, byte[] buf,
       int bufOffset, int necessaryLen, int extraLen) throws IOException {
     int bytesRemaining = necessaryLen + extraLen;
     while (bytesRemaining > 0) {
@@ -698,7 +734,8 @@ public class HFileBlock implements Cacheable {
   }
 
   /**
-   * Read from an input stream. Analogous to
+   * Read from an input stream at least <code>necessaryLen</code> and if possible,
+   * <code>extraLen</code> also if available. Analogous to
    * {@link IOUtils#readFully(InputStream, byte[], int, int)}, but uses
    * positional read and specifies a number of "extra" bytes that would be
    * desirable but not absolutely necessary to read.
@@ -751,14 +788,13 @@ public class HFileBlock implements Cacheable {
    * <li>Construct an {@link HFileBlock.Writer}, providing a compression algorithm.
    * <li>Call {@link Writer#startWriting} and get a data stream to write to.
    * <li>Write your data into the stream.
-   * <li>Call {@link Writer#writeHeaderAndData(FSDataOutputStream)} as many times as you need to.
+   * <li>Call Writer#writeHeaderAndData(FSDataOutputStream) as many times as you need to.
    * store the serialized block into an external stream.
    * <li>Repeat to write more blocks.
    * </ol>
    * <p>
    */
   public static class Writer {
-
     private enum State {
       INIT,
       WRITING,
@@ -773,7 +809,7 @@ public class HFileBlock implements Cacheable {
 
     private HFileBlockEncodingContext dataBlockEncodingCtx;
 
-    /** block encoding context for non-data blocks */
+    /** block encoding context for non-data blocks*/
     private HFileBlockDefaultEncodingContext defaultBlockEncodingCtx;
 
     /**
@@ -846,25 +882,26 @@ public class HFileBlock implements Cacheable {
      * @param dataBlockEncoder data block encoding algorithm to use
      */
     public Writer(HFileDataBlockEncoder dataBlockEncoder, HFileContext fileContext) {
-      this.dataBlockEncoder = dataBlockEncoder != null
-          ? dataBlockEncoder : NoOpDataBlockEncoder.INSTANCE;
-      defaultBlockEncodingCtx = new HFileBlockDefaultEncodingContext(null,
-          HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
-      dataBlockEncodingCtx = this.dataBlockEncoder
-          .newDataBlockEncodingContext(HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
-
       if (fileContext.getBytesPerChecksum() < HConstants.HFILEBLOCK_HEADER_SIZE) {
         throw new RuntimeException("Unsupported value of bytesPerChecksum. " +
             " Minimum is " + HConstants.HFILEBLOCK_HEADER_SIZE + " but the configured value is " +
             fileContext.getBytesPerChecksum());
       }
-
+      this.dataBlockEncoder = dataBlockEncoder != null?
+          dataBlockEncoder: NoOpDataBlockEncoder.INSTANCE;
+      this.dataBlockEncodingCtx = this.dataBlockEncoder.
+          newDataBlockEncodingContext(HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
+      // TODO: This should be lazily instantiated since we usually do NOT need this default encoder
+      this.defaultBlockEncodingCtx = new HFileBlockDefaultEncodingContext(null,
+          HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
+      // TODO: Set BAOS initial size. Use fileContext.getBlocksize() and add for header/checksum
       baosInMemory = new ByteArrayOutputStream();
-
       prevOffsetByType = new long[BlockType.values().length];
-      for (int i = 0; i < prevOffsetByType.length; ++i)
-        prevOffsetByType[i] = -1;
-
+      for (int i = 0; i < prevOffsetByType.length; ++i) {
+        prevOffsetByType[i] = UNSET;
+      }
+      // TODO: Why fileContext saved away when we have dataBlockEncoder and/or
+      // defaultDataBlockEncoder?
       this.fileContext = fileContext;
     }
 
@@ -874,7 +911,7 @@ public class HFileBlock implements Cacheable {
      * @return the stream the user can write their data into
      * @throws IOException
      */
-    public DataOutputStream startWriting(BlockType newBlockType)
+    DataOutputStream startWriting(BlockType newBlockType)
         throws IOException {
       if (state == State.BLOCK_READY && startOffset != -1) {
         // We had a previous block that was written to a stream at a specific
@@ -904,10 +941,10 @@ public class HFileBlock implements Cacheable {
      * @param cell
      * @throws IOException
      */
-    public void write(Cell cell) throws IOException{
+    void write(Cell cell) throws IOException{
       expectState(State.WRITING);
-      this.unencodedDataSizeWritten += this.dataBlockEncoder.encode(cell, dataBlockEncodingCtx,
-          this.userDataStream);
+      this.unencodedDataSizeWritten +=
+          this.dataBlockEncoder.encode(cell, dataBlockEncodingCtx, this.userDataStream);
     }
 
     /**
@@ -954,6 +991,7 @@ public class HFileBlock implements Cacheable {
       }
       userDataStream.flush();
       // This does an array copy, so it is safe to cache this byte array.
+      // Header is still the empty, 'dummy' header that is yet to be filled out.
       uncompressedBytesWithHeader = baosInMemory.toByteArray();
       prevOffset = prevOffsetByType[blockType.getId()];
 
@@ -965,22 +1003,25 @@ public class HFileBlock implements Cacheable {
         onDiskBytesWithHeader = dataBlockEncodingCtx
             .compressAndEncrypt(uncompressedBytesWithHeader);
       } else {
-        onDiskBytesWithHeader = defaultBlockEncodingCtx
-            .compressAndEncrypt(uncompressedBytesWithHeader);
+        onDiskBytesWithHeader = this.defaultBlockEncodingCtx.
+            compressAndEncrypt(uncompressedBytesWithHeader);
       }
+      // Calculate how many bytes we need for checksum on the tail of the block.
       int numBytes = (int) ChecksumUtil.numBytes(
           onDiskBytesWithHeader.length,
           fileContext.getBytesPerChecksum());
 
-      // put the header for on disk bytes
+      // Put the header for the on disk bytes; header currently is unfilled-out
       putHeader(onDiskBytesWithHeader, 0,
           onDiskBytesWithHeader.length + numBytes,
           uncompressedBytesWithHeader.length, onDiskBytesWithHeader.length);
-      // set the header for the uncompressed bytes (for cache-on-write)
-      putHeader(uncompressedBytesWithHeader, 0,
+      // Set the header for the uncompressed bytes (for cache-on-write) -- IFF different from
+      // onDiskBytesWithHeader array.
+      if (onDiskBytesWithHeader != uncompressedBytesWithHeader) {
+        putHeader(uncompressedBytesWithHeader, 0,
           onDiskBytesWithHeader.length + numBytes,
           uncompressedBytesWithHeader.length, onDiskBytesWithHeader.length);
-
+      }
       onDiskChecksum = new byte[numBytes];
       ChecksumUtil.generateChecksums(
           onDiskBytesWithHeader, 0, onDiskBytesWithHeader.length,
@@ -1027,9 +1068,9 @@ public class HFileBlock implements Cacheable {
      * @param out
      * @throws IOException
      */
-    public void writeHeaderAndData(FSDataOutputStream out) throws IOException {
+    void writeHeaderAndData(FSDataOutputStream out) throws IOException {
       long offset = out.getPos();
-      if (startOffset != -1 && offset != startOffset) {
+      if (startOffset != UNSET && offset != startOffset) {
         throw new IOException("A " + blockType + " block written to a "
             + "stream twice, first at offset " + startOffset + ", then at "
             + offset);
@@ -1082,7 +1123,7 @@ public class HFileBlock implements Cacheable {
     /**
      * Releases resources used by this writer.
      */
-    public void release() {
+    void release() {
       if (dataBlockEncodingCtx != null) {
         dataBlockEncodingCtx.close();
         dataBlockEncodingCtx = null;
@@ -1103,9 +1144,8 @@ public class HFileBlock implements Cacheable {
      */
     int getOnDiskSizeWithoutHeader() {
       expectState(State.BLOCK_READY);
-      return onDiskBytesWithHeader.length
-          + onDiskChecksum.length
-          - HConstants.HFILEBLOCK_HEADER_SIZE;
+      return onDiskBytesWithHeader.length +
+          onDiskChecksum.length - HConstants.HFILEBLOCK_HEADER_SIZE;
     }
 
     /**
@@ -1137,7 +1177,7 @@ public class HFileBlock implements Cacheable {
     }
 
     /** @return true if a block is being written  */
-    public boolean isWriting() {
+    boolean isWriting() {
       return state == State.WRITING;
     }
 
@@ -1148,7 +1188,7 @@ public class HFileBlock implements Cacheable {
      *
      * @return the number of bytes written
      */
-    public int blockSizeWritten() {
+    int blockSizeWritten() {
       if (state != State.WRITING) return 0;
       return this.unencodedDataSizeWritten;
     }
@@ -1196,7 +1236,7 @@ public class HFileBlock implements Cacheable {
      * @param out the file system output stream
      * @throws IOException
      */
-    public void writeBlock(BlockWritable bw, FSDataOutputStream out)
+    void writeBlock(BlockWritable bw, FSDataOutputStream out)
         throws IOException {
       bw.writeToBlock(startWriting(bw.getBlockType()));
       writeHeaderAndData(out);
@@ -1209,7 +1249,7 @@ public class HFileBlock implements Cacheable {
      * version is MINOR_VERSION_WITH_CHECKSUM. This is indicated by setting a
      * 0 value in bytesPerChecksum.
      */
-    public HFileBlock getBlockForCaching(CacheConfig cacheConf) {
+    HFileBlock getBlockForCaching(CacheConfig cacheConf) {
       HFileContext newContext = new HFileContextBuilder()
                                 .withBlockSize(fileContext.getBlocksize())
                                 .withBytesPerCheckSum(0)
@@ -1232,7 +1272,7 @@ public class HFileBlock implements Cacheable {
   }
 
   /** Something that can be written into a block. */
-  public interface BlockWritable {
+  interface BlockWritable {
 
     /** The type of block this data should use. */
     BlockType getBlockType();
@@ -1249,7 +1289,7 @@ public class HFileBlock implements Cacheable {
   // Block readers and writers
 
   /** An interface allowing to iterate {@link HFileBlock}s. */
-  public interface BlockIterator {
+  interface BlockIterator {
 
     /**
      * Get the next block, or null if there are no more blocks to iterate.
@@ -1264,7 +1304,7 @@ public class HFileBlock implements Cacheable {
   }
 
   /** A full-fledged reader with iteration ability. */
-  public interface FSReader {
+  interface FSReader {
 
     /**
      * Reads the block at the given offset in the file with the given on-disk
@@ -1371,8 +1411,8 @@ public class HFileBlock implements Cacheable {
      * the on-disk size of the next block, or -1 if it could not be determined.
      *
      * @param dest destination buffer
-     * @param destOffset offset in the destination buffer
-     * @param size size of the block to be read
+     * @param destOffset offset into the destination buffer at where to put the bytes we read
+     * @param size size of read
      * @param peekIntoNextBlock whether to read the next block's on-disk size
      * @param fileOffset position in the stream to read at
      * @param pread whether we should do a positional read
@@ -1381,12 +1421,10 @@ public class HFileBlock implements Cacheable {
      *         -1 if it could not be determined
      * @throws IOException
      */
-    protected int readAtOffset(FSDataInputStream istream,
-        byte[] dest, int destOffset, int size,
+    protected int readAtOffset(FSDataInputStream istream, byte [] dest, int destOffset, int size,
         boolean peekIntoNextBlock, long fileOffset, boolean pread)
-        throws IOException {
-      if (peekIntoNextBlock &&
-          destOffset + size + hdrSize > dest.length) {
+    throws IOException {
+      if (peekIntoNextBlock && destOffset + size + hdrSize > dest.length) {
         // We are asked to read the next block's header as well, but there is
         // not enough room in the array.
         throw new IOException("Attempted to read " + size + " bytes and " +
@@ -1440,9 +1478,15 @@ public class HFileBlock implements Cacheable {
     long offset = -1;
     byte[] header = new byte[HConstants.HFILEBLOCK_HEADER_SIZE];
     final ByteBuffer buf = ByteBuffer.wrap(header, 0, HConstants.HFILEBLOCK_HEADER_SIZE);
+    @Override
+    public String toString() {
+      return "offset=" + this.offset + ", header=" + Bytes.toStringBinary(header);
+    }
   }
 
-  /** Reads version 2 blocks from the filesystem. */
+  /**
+   * Reads version 2 blocks from the filesystem.
+   */
   static class FSReaderImpl extends AbstractFSReader {
     /** The file system stream of the underlying {@link HFile} that
      * does or doesn't do checksum validations in the filesystem */
@@ -1523,7 +1567,7 @@ public class HFileBlock implements Cacheable {
           HFile.LOG.warn(msg);
           throw new IOException(msg); // cannot happen case here
         }
-        HFile.checksumFailures.increment(); // update metrics
+        HFile.CHECKSUM_FAILURES.increment(); // update metrics
 
         // If we have a checksum failure, we fall back into a mode where
         // the next few reads use HDFS level checksums. We aim to make the
@@ -1598,6 +1642,7 @@ public class HFileBlock implements Cacheable {
       }
 
       int onDiskSizeWithHeader = (int) onDiskSizeWithHeaderL;
+
       // See if we can avoid reading the header. This is desirable, because
       // we will not incur a backward seek operation if we have already
       // read this block's header as part of the previous read's look-ahead.
@@ -1605,15 +1650,22 @@ public class HFileBlock implements Cacheable {
       // been read.
       // TODO: How often does this optimization fire? Has to be same thread so the thread local
       // is pertinent and we have to be reading next block as in a big scan.
+      ByteBuffer headerBuf = null;
       PrefetchedHeader prefetchedHeader = prefetchedHeaderForThread.get();
-      ByteBuffer headerBuf = prefetchedHeader.offset == offset? prefetchedHeader.buf: null;
-
+      boolean preReadHeader = false;
+      if (prefetchedHeader != null && prefetchedHeader.offset == offset) {
+        headerBuf = prefetchedHeader.buf;
+        preReadHeader = true;
+      }
       // Allocate enough space to fit the next block's header too.
       int nextBlockOnDiskSize = 0;
       byte[] onDiskBlock = null;
 
       HFileBlock b = null;
+      boolean fastPath = false;
+      boolean readHdrOnly = false;
       if (onDiskSizeWithHeader > 0) {
+        fastPath = true;
         // We know the total on-disk size. Read the entire block into memory,
         // then parse the header. This code path is used when
         // doing a random read operation relying on the block index, as well as
@@ -1670,6 +1722,7 @@ public class HFileBlock implements Cacheable {
         // Unfortunately, we still have to do a separate read operation to
         // read the header.
         if (headerBuf == null) {
+          readHdrOnly = true;
           // From the header, determine the on-disk size of the given hfile
           // block, and read the remaining data, thereby incurring two read
           // operations. This might happen when we are doing the first read
@@ -1682,12 +1735,12 @@ public class HFileBlock implements Cacheable {
         }
         // TODO: FIX!!! Expensive parse just to get a length
         b = new HFileBlock(headerBuf, fileContext.isUseHBaseChecksum());
+        // onDiskBlock is whole block + header + checksums then extra hdrSize to read next header
         onDiskBlock = new byte[b.getOnDiskSizeWithHeader() + hdrSize];
-        // headerBuf is HBB
+        // headerBuf is HBB. Copy hdr into onDiskBlock
         System.arraycopy(headerBuf.array(), headerBuf.arrayOffset(), onDiskBlock, 0, hdrSize);
-        nextBlockOnDiskSize =
-          readAtOffset(is, onDiskBlock, hdrSize, b.getOnDiskSizeWithHeader()
-              - hdrSize, true, offset + hdrSize, pread);
+        nextBlockOnDiskSize = readAtOffset(is, onDiskBlock, hdrSize,
+            b.getOnDiskSizeWithHeader() - hdrSize, true, offset + hdrSize, pread);
         onDiskSizeWithHeader = b.onDiskSizeWithoutHeader + hdrSize;
       }
 
@@ -1717,6 +1770,10 @@ public class HFileBlock implements Cacheable {
       b.offset = offset;
       b.fileContext.setIncludesTags(this.fileContext.isIncludesTags());
       b.fileContext.setIncludesMvcc(this.fileContext.isIncludesMvcc());
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("Read preReadHeader=" + preReadHeader + ", fastPath=" + fastPath +
+            ", readHdrOnly=" + readHdrOnly + ", " + b);
+      }
       return b;
     }
 
@@ -1778,6 +1835,9 @@ public class HFileBlock implements Cacheable {
     serializeExtraInfo(destination);
   }
 
+  /**
+   * Write out the content of EXTRA_SERIALIZATION_SPACE. Public so can be accessed by BucketCache.
+   */
   public void serializeExtraInfo(ByteBuffer destination) {
     destination.put(this.fileContext.isUseHBaseChecksum() ? (byte) 1 : (byte) 0);
     destination.putLong(this.offset);
@@ -1850,7 +1910,7 @@ public class HFileBlock implements Cacheable {
   }
 
   /**
-   * Calcuate the number of bytes required to store all the checksums
+   * Calculate the number of bytes required to store all the checksums
    * for this block. Each checksum value is a 4 byte integer.
    */
   int totalChecksumBytes() {
@@ -1876,16 +1936,14 @@ public class HFileBlock implements Cacheable {
    * Maps a minor version to the size of the header.
    */
   public static int headerSize(boolean usesHBaseChecksum) {
-    if (usesHBaseChecksum) {
-      return HConstants.HFILEBLOCK_HEADER_SIZE;
-    }
-    return HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM;
+    return usesHBaseChecksum?
+        HConstants.HFILEBLOCK_HEADER_SIZE: HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM;
   }
 
   /**
    * Return the appropriate DUMMY_HEADER for the minor version
    */
-  public byte[] getDummyHeaderForVersion() {
+  byte[] getDummyHeaderForVersion() {
     return getDummyHeaderForVersion(this.fileContext.isUseHBaseChecksum());
   }
 
@@ -1893,17 +1951,14 @@ public class HFileBlock implements Cacheable {
    * Return the appropriate DUMMY_HEADER for the minor version
    */
   static private byte[] getDummyHeaderForVersion(boolean usesHBaseChecksum) {
-    if (usesHBaseChecksum) {
-      return HConstants.HFILEBLOCK_DUMMY_HEADER;
-    }
-    return DUMMY_HEADER_NO_CHECKSUM;
+    return usesHBaseChecksum? HConstants.HFILEBLOCK_DUMMY_HEADER: DUMMY_HEADER_NO_CHECKSUM;
   }
 
   /**
    * @return the HFileContext used to create this HFileBlock. Not necessary the
    * fileContext for the file from which this block's data was originally read.
    */
-  public HFileContext getHFileContext() {
+  HFileContext getHFileContext() {
     return this.fileContext;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/0b65f200/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
index 3bd5ac3..f9a9a5d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
@@ -425,7 +425,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
             assert cachedBlock.isUnpacked() : "Packed block leak.";
             if (cachedBlock.getBlockType().isData()) {
               if (updateCacheMetrics) {
-                HFile.dataBlockReadCnt.increment();
+                HFile.DATABLOCK_READ_COUNT.increment();
               }
               // Validate encoding type for data blocks. We include encoding
               // type in the cache key, and we expect it to match on a cache hit.
@@ -464,7 +464,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
         }
 
         if (updateCacheMetrics && hfileBlock.getBlockType().isData()) {
-          HFile.dataBlockReadCnt.increment();
+          HFile.DATABLOCK_READ_COUNT.increment();
         }
 
         return unpacked;

http://git-wip-us.apache.org/repos/asf/hbase/blob/0b65f200/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
index 8c0d0b5..fedfd20 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
@@ -270,9 +270,10 @@ public final class BucketAllocator {
     }
   }
 
-  // Default block size is 64K, so we choose more sizes near 64K, you'd better
+  // Default block size in hbase is 64K, so we choose more sizes near 64K, you'd better
   // reset it according to your cluster's block size distribution
   // TODO Support the view of block size distribution statistics
+  // TODO: Why we add the extra 1024 bytes? Slop?
   private static final int DEFAULT_BUCKET_SIZES[] = { 4 * 1024 + 1024, 8 * 1024 + 1024,
       16 * 1024 + 1024, 32 * 1024 + 1024, 40 * 1024 + 1024, 48 * 1024 + 1024,
       56 * 1024 + 1024, 64 * 1024 + 1024, 96 * 1024 + 1024, 128 * 1024 + 1024,
@@ -290,6 +291,9 @@ public final class BucketAllocator {
     return null;
   }
 
+  /**
+   * So, what is the minimum amount of items we'll tolerate in a single bucket?
+   */
   static public final int FEWEST_ITEMS_IN_BUCKET = 4;
 
   private final int[] bucketSizes;
@@ -309,9 +313,8 @@ public final class BucketAllocator {
     this.bucketCapacity = FEWEST_ITEMS_IN_BUCKET * bigItemSize;
     buckets = new Bucket[(int) (availableSpace / bucketCapacity)];
     if (buckets.length < this.bucketSizes.length)
-      throw new BucketAllocatorException(
-          "Bucket allocator size too small - must have room for at least "
-              + this.bucketSizes.length + " buckets");
+      throw new BucketAllocatorException("Bucket allocator size too small (" + buckets.length +
+        "); must have room for at least " + this.bucketSizes.length + " buckets");
     bucketSizeInfos = new BucketSizeInfo[this.bucketSizes.length];
     for (int i = 0; i < this.bucketSizes.length; ++i) {
       bucketSizeInfos[i] = new BucketSizeInfo(i);
@@ -322,6 +325,12 @@ public final class BucketAllocator {
           .instantiateBucket(buckets[i]);
     }
     this.totalSize = ((long) buckets.length) * bucketCapacity;
+    if (LOG.isInfoEnabled()) {
+      LOG.info("Cache totalSize=" + this.totalSize + ", buckets=" + this.buckets.length +
+        ", bucket capacity=" + this.bucketCapacity +
+        "=(" + FEWEST_ITEMS_IN_BUCKET + "*" + this.bigItemSize + ")=" +
+        "(FEWEST_ITEMS_IN_BUCKET*(largest configured bucketcache size))");
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/0b65f200/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 4699262..3f4640d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -75,7 +75,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 /**
  * BucketCache uses {@link BucketAllocator} to allocate/free blocks, and uses
- * {@link BucketCache#ramCache} and {@link BucketCache#backingMap} in order to
+ * BucketCache#ramCache and BucketCache#backingMap in order to
  * determine if a given element is in the cache. The bucket cache can use on-heap or
  * off-heap memory {@link ByteBufferIOEngine} or in a file {@link FileIOEngine} to
  * store/read the block data.
@@ -84,7 +84,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
  * {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache}
  *
  * <p>BucketCache can be used as mainly a block cache (see
- * {@link org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}),
+ * {@link org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}), combined with
  * combined with LruBlockCache to decrease CMS GC and heap fragmentation.
  *
  * <p>It also can be used as a secondary cache (e.g. using a file on ssd/fusionio to store
@@ -343,6 +343,7 @@ public class BucketCache implements BlockCache, HeapSize {
    */
   public void cacheBlockWithWait(BlockCacheKey cacheKey, Cacheable cachedItem, boolean inMemory,
       boolean wait) {
+    if (LOG.isTraceEnabled()) LOG.trace("Caching key=" + cacheKey + ", item=" + cachedItem);
     if (!cacheEnabled) {
       return;
     }
@@ -414,6 +415,9 @@ public class BucketCache implements BlockCache, HeapSize {
         // existence here.
         if (bucketEntry.equals(backingMap.get(key))) {
           int len = bucketEntry.getLength();
+          if (LOG.isTraceEnabled()) {
+            LOG.trace("Read offset=" + bucketEntry.offset() + ", len=" + len);
+          }
           ByteBuffer bb = ByteBuffer.allocate(len);
           int lenRead = ioEngine.read(bb, bucketEntry.offset());
           if (lenRead != len) {
@@ -565,7 +569,9 @@ public class BucketCache implements BlockCache, HeapSize {
    */
   private void freeSpace(final String why) {
     // Ensure only one freeSpace progress at a time
-    if (!freeSpaceLock.tryLock()) return;
+    if (!freeSpaceLock.tryLock()) {
+      return;
+    }
     try {
       freeInProgress = true;
       long bytesToFreeWithoutExtra = 0;
@@ -594,7 +600,7 @@ public class BucketCache implements BlockCache, HeapSize {
         return;
       }
       long currentSize = bucketAllocator.getUsedSize();
-      long totalSize=bucketAllocator.getTotalSize();
+      long totalSize = bucketAllocator.getTotalSize();
       if (LOG.isDebugEnabled() && msgBuffer != null) {
         LOG.debug("Free started because \"" + why + "\"; " + msgBuffer.toString() +
           " of current used=" + StringUtils.byteDesc(currentSize) + ", actual cacheSize=" +
@@ -799,7 +805,7 @@ public class BucketCache implements BlockCache, HeapSize {
         }
       }
 
-      // Make sure data pages are written are on media before we update maps.
+      // Make sure data pages are written on media before we update maps.
       try {
         ioEngine.sync();
       } catch (IOException ioex) {
@@ -873,9 +879,9 @@ public class BucketCache implements BlockCache, HeapSize {
     FileOutputStream fos = null;
     ObjectOutputStream oos = null;
     try {
-      if (!ioEngine.isPersistent())
-        throw new IOException(
-            "Attempt to persist non-persistent cache mappings!");
+      if (!ioEngine.isPersistent()) {
+        throw new IOException("Attempt to persist non-persistent cache mappings!");
+      }
       fos = new FileOutputStream(persistencePath, false);
       oos = new ObjectOutputStream(fos);
       oos.writeLong(cacheCapacity);
@@ -955,19 +961,17 @@ public class BucketCache implements BlockCache, HeapSize {
   }
 
   /**
-   * Used to shut down the cache -or- turn it off in the case of something
-   * broken.
+   * Used to shut down the cache -or- turn it off in the case of something broken.
    */
   private void disableCache() {
-    if (!cacheEnabled)
-      return;
+    if (!cacheEnabled) return;
     cacheEnabled = false;
     ioEngine.shutdown();
     this.scheduleThreadPool.shutdown();
-    for (int i = 0; i < writerThreads.length; ++i)
-      writerThreads[i].interrupt();
+    for (int i = 0; i < writerThreads.length; ++i) writerThreads[i].interrupt();
     this.ramCache.clear();
     if (!ioEngine.isPersistent() || persistencePath == null) {
+      // If persistent ioengine and a path, we will serialize out the backingMap.
       this.backingMap.clear();
     }
   }
@@ -1255,6 +1259,9 @@ public class BucketCache implements BlockCache, HeapSize {
             len == sliceBuf.limit() + block.headerSize() + HFileBlock.EXTRA_SERIALIZATION_SPACE;
           ByteBuffer extraInfoBuffer = ByteBuffer.allocate(HFileBlock.EXTRA_SERIALIZATION_SPACE);
           block.serializeExtraInfo(extraInfoBuffer);
+          if (LOG.isTraceEnabled()) {
+            LOG.trace("Write offset=" + offset + ", len=" + len);
+          }
           ioEngine.write(sliceBuf, offset);
           ioEngine.write(extraInfoBuffer, offset + len - HFileBlock.EXTRA_SERIALIZATION_SPACE);
         } else {

http://git-wip-us.apache.org/repos/asf/hbase/blob/0b65f200/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
index 01a0c9c..f5d9dde 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
@@ -37,7 +37,6 @@ import java.util.SortedSet;
 import java.util.UUID;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;

http://git-wip-us.apache.org/repos/asf/hbase/blob/0b65f200/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterListOrOperatorWithBlkCnt.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterListOrOperatorWithBlkCnt.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterListOrOperatorWithBlkCnt.java
index 058280d..d359f3b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterListOrOperatorWithBlkCnt.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterListOrOperatorWithBlkCnt.java
@@ -83,7 +83,7 @@ public class TestFilterListOrOperatorWithBlkCnt {
   }
 
   private static long getBlkAccessCount() {
-    return HFile.dataBlockReadCnt.get();
+    return HFile.DATABLOCK_READ_COUNT.get();
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hbase/blob/0b65f200/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
index 1995aa4..5c11818 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
@@ -72,6 +72,7 @@ public class TestDataBlockEncoders {
 
   private static int ENCODED_DATA_OFFSET = HConstants.HFILEBLOCK_HEADER_SIZE
       + DataBlockEncoding.ID_SIZE;
+  static final byte[] HFILEBLOCK_DUMMY_HEADER = new byte[HConstants.HFILEBLOCK_HEADER_SIZE];
 
   private RedundantKVGenerator generator = new RedundantKVGenerator();
   private Random randomizer = new Random(42l);
@@ -87,7 +88,7 @@ public class TestDataBlockEncoders {
     this.includesMemstoreTS = includesMemstoreTS;
     this.includesTags = includesTag;
   }
-  
+
   private HFileBlockEncodingContext getEncodingContext(Compression.Algorithm algo,
       DataBlockEncoding encoding) {
     DataBlockEncoder encoder = encoding.getEncoder();
@@ -97,17 +98,15 @@ public class TestDataBlockEncoders {
                         .withIncludesTags(includesTags)
                         .withCompression(algo).build();
     if (encoder != null) {
-      return encoder.newDataBlockEncodingContext(encoding,
-          HConstants.HFILEBLOCK_DUMMY_HEADER, meta);
+      return encoder.newDataBlockEncodingContext(encoding, HFILEBLOCK_DUMMY_HEADER, meta);
     } else {
-      return new HFileBlockDefaultEncodingContext(encoding,
-          HConstants.HFILEBLOCK_DUMMY_HEADER, meta);
+      return new HFileBlockDefaultEncodingContext(encoding, HFILEBLOCK_DUMMY_HEADER, meta);
     }
   }
 
   /**
    * Test data block encoding of empty KeyValue.
-   * 
+   *
    * @throws IOException
    *           On test failure.
    */
@@ -134,7 +133,7 @@ public class TestDataBlockEncoders {
 
   /**
    * Test KeyValues with negative timestamp.
-   * 
+   *
    * @throws IOException
    *           On test failure.
    */
@@ -179,7 +178,7 @@ public class TestDataBlockEncoders {
     List<KeyValue> sampleKv = generator.generateTestKeyValues(NUMBER_OF_KV, includesTags);
 
     // create all seekers
-    List<DataBlockEncoder.EncodedSeeker> encodedSeekers = 
+    List<DataBlockEncoder.EncodedSeeker> encodedSeekers =
         new ArrayList<DataBlockEncoder.EncodedSeeker>();
     for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
       LOG.info("Encoding: " + encoding);
@@ -237,7 +236,7 @@ public class TestDataBlockEncoders {
       HFileBlockEncodingContext encodingContext) throws IOException {
     DataBlockEncoder encoder = encoding.getEncoder();
     ByteArrayOutputStream baos = new ByteArrayOutputStream();
-    baos.write(HConstants.HFILEBLOCK_DUMMY_HEADER);
+    baos.write(HFILEBLOCK_DUMMY_HEADER);
     DataOutputStream dos = new DataOutputStream(baos);
     encoder.startBlockEncoding(encodingContext, dos);
     for (KeyValue kv : kvs) {
@@ -332,7 +331,7 @@ public class TestDataBlockEncoders {
       }
     }
   }
-  
+
   private void checkSeekingConsistency(List<DataBlockEncoder.EncodedSeeker> encodedSeekers,
       boolean seekBefore, KeyValue keyValue) {
     ByteBuffer expectedKeyValue = null;
@@ -378,10 +377,10 @@ public class TestDataBlockEncoders {
         continue;
       }
       HFileBlockEncodingContext encodingContext = new HFileBlockDefaultEncodingContext(encoding,
-          HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
+          HFILEBLOCK_DUMMY_HEADER, fileContext);
 
       ByteArrayOutputStream baos = new ByteArrayOutputStream();
-      baos.write(HConstants.HFILEBLOCK_DUMMY_HEADER);
+      baos.write(HFILEBLOCK_DUMMY_HEADER);
       DataOutputStream dos = new DataOutputStream(baos);
       encoder.startBlockEncoding(encodingContext, dos);
       for (KeyValue kv : kvList) {
@@ -395,7 +394,7 @@ public class TestDataBlockEncoders {
       testAlgorithm(encodedData, unencodedDataBuf, encoder);
     }
   }
-  
+
   @Test
   public void testZeroByte() throws IOException {
     List<KeyValue> kvList = new ArrayList<KeyValue>();

http://git-wip-us.apache.org/repos/asf/hbase/blob/0b65f200/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java
index ec2befe..914a37b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java
@@ -37,6 +37,7 @@ import org.junit.experimental.categories.Category;
 
 @Category(SmallTests.class)
 public class TestSeekToBlockWithEncoders {
+  static final byte[] HFILEBLOCK_DUMMY_HEADER = new byte[HConstants.HFILEBLOCK_HEADER_SIZE];
 
   /**
    * Test seeking while file is encoded.
@@ -261,8 +262,7 @@ public class TestSeekToBlockWithEncoders {
       HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false)
           .withIncludesMvcc(false).withIncludesTags(false)
           .withCompression(Compression.Algorithm.NONE).build();
-      HFileBlockEncodingContext encodingContext = encoder.newDataBlockEncodingContext(encoding,
-          HConstants.HFILEBLOCK_DUMMY_HEADER, meta);
+      HFileBlockEncodingContext encodingContext = encoder.newDataBlockEncodingContext(encoding, HFILEBLOCK_DUMMY_HEADER, meta);
       ByteBuffer encodedBuffer = TestDataBlockEncoders.encodeKeyValues(encoding, kvs,
           encodingContext);
       DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR,

http://git-wip-us.apache.org/repos/asf/hbase/blob/0b65f200/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
index 64f46c8..16583d9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
@@ -98,7 +98,7 @@ public class TestForceCacheImportantBlocks {
   public void setup() {
     // Make sure we make a new one each time.
     CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = null;
-    HFile.dataBlockReadCnt.set(0);
+    HFile.DATABLOCK_READ_COUNT.set(0);
   }
 
   @Test
@@ -115,12 +115,12 @@ public class TestForceCacheImportantBlocks {
     CacheStats stats = cache.getStats();
     writeTestData(region);
     assertEquals(0, stats.getHitCount());
-    assertEquals(0, HFile.dataBlockReadCnt.get());
+    assertEquals(0, HFile.DATABLOCK_READ_COUNT.get());
     // Do a single get, take count of caches.  If we are NOT caching DATA blocks, the miss
     // count should go up.  Otherwise, all should be cached and the miss count should not rise.
     region.get(new Get(Bytes.toBytes("row" + 0)));
     assertTrue(stats.getHitCount() > 0);
-    assertTrue(HFile.dataBlockReadCnt.get() > 0);
+    assertTrue(HFile.DATABLOCK_READ_COUNT.get() > 0);
     long missCount = stats.getMissCount();
     region.get(new Get(Bytes.toBytes("row" + 0)));
     if (this.cfCacheEnabled) assertEquals(missCount, stats.getMissCount());

http://git-wip-us.apache.org/repos/asf/hbase/blob/0b65f200/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
index 9810448..5e24ee2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
@@ -545,7 +545,7 @@ public class TestHFileBlock {
           for (int i = 0; i < NUM_TEST_BLOCKS; ++i) {
             if (!pread) {
               assertEquals(is.getPos(), curOffset + (i == 0 ? 0 :
-                  HConstants.HFILEBLOCK_HEADER_SIZE));
+                HConstants.HFILEBLOCK_HEADER_SIZE));
             }
 
             assertEquals(expectedOffsets.get(i).longValue(), curOffset);

http://git-wip-us.apache.org/repos/asf/hbase/blob/0b65f200/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java
index 52596f4..ec11f18 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java
@@ -344,8 +344,7 @@ public class TestHFileBlockCompatibility {
     // These constants are as they were in minorVersion 0.
     private static final int HEADER_SIZE = HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM;
     private static final boolean DONT_FILL_HEADER = HFileBlock.DONT_FILL_HEADER;
-    private static final byte[] DUMMY_HEADER =
-      HFileBlock.DUMMY_HEADER_NO_CHECKSUM;
+    private static final byte[] DUMMY_HEADER = HFileBlock.DUMMY_HEADER_NO_CHECKSUM;
 
     private enum State {
       INIT,

http://git-wip-us.apache.org/repos/asf/hbase/blob/0b65f200/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java
index a002f28..7bad8f4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java
@@ -203,7 +203,7 @@ public class TestBlocksRead extends HBaseTestCase {
   }
 
   private static long getBlkAccessCount(byte[] cf) {
-      return HFile.dataBlockReadCnt.get();
+      return HFile.DATABLOCK_READ_COUNT.get();
   }
 
   private static long getBlkCount() {


Mime
View raw message