hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject [1/4] hbase git commit: HBASE-Squash HFileReaderV3 together with HFileReaderV2 and AbstractHFileReader; ditto for Scanners and BlockReader, etc.
Date Fri, 03 Apr 2015 21:25:25 GMT
Repository: hbase
Updated Branches:
  refs/heads/branch-1 dd4f8a5e5 -> 691efc60f


http://git-wip-us.apache.org/repos/asf/hbase/blob/691efc60/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
new file mode 100644
index 0000000..e2b6efd
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
@@ -0,0 +1,641 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.io.hfile;
+
+import java.io.DataOutput;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparator;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValue.KVComparator;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.io.compress.Compression;
+import org.apache.hadoop.hbase.io.crypto.Encryption;
+import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
+import org.apache.hadoop.hbase.io.hfile.HFileBlock.BlockWritable;
+import org.apache.hadoop.hbase.security.EncryptionUtil;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.util.BloomFilterWriter;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.io.Writable;
+
+/**
+ * Common functionality needed by all versions of {@link HFile} writers.
+ */
+@InterfaceAudience.Private
+public class HFileWriterImpl implements HFile.Writer {
+  private static final Log LOG = LogFactory.getLog(HFileWriterImpl.class);
+
+  /** The Cell previously appended. Becomes the last cell in the file.*/
+  protected Cell lastCell = null;
+
+  /** FileSystem stream to write into. */
+  protected FSDataOutputStream outputStream;
+
+  /** True if we opened the <code>outputStream</code> (and so will close it). */
+  protected final boolean closeOutputStream;
+
+  /** A "file info" block: a key-value map of file-wide metadata. */
+  protected FileInfo fileInfo = new HFile.FileInfo();
+
+  /** Total # of key/value entries, i.e. how many times add() was called. */
+  protected long entryCount = 0;
+
+  /** Used for calculating the average key length. */
+  protected long totalKeyLength = 0;
+
+  /** Used for calculating the average value length. */
+  protected long totalValueLength = 0;
+
+  /** Total uncompressed bytes, maybe calculate a compression ratio later. */
+  protected long totalUncompressedBytes = 0;
+
+  /** Key comparator. Used to ensure we write in order. */
+  protected final KVComparator comparator;
+
+  /** Meta block names. */
+  protected List<byte[]> metaNames = new ArrayList<byte[]>();
+
+  /** {@link Writable}s representing meta block data. */
+  protected List<Writable> metaData = new ArrayList<Writable>();
+
+  /**
+   * First cell in a block.
+   * This reference should be short-lived since we write hfiles in a burst.
+   */
+  protected Cell firstCellInBlock = null;
+
+
+  /** May be null if we were passed a stream. */
+  protected final Path path;
+
+  /** Cache configuration for caching data on write. */
+  protected final CacheConfig cacheConf;
+
+  /**
+   * Name for this object used when logging or in toString. Is either
+   * the result of a toString on stream or else name of passed file Path.
+   */
+  protected final String name;
+
+  /**
+   * The data block encoding which will be used.
+   * {@link NoOpDataBlockEncoder#INSTANCE} if there is no encoding.
+   */
+  protected final HFileDataBlockEncoder blockEncoder;
+
+  protected final HFileContext hFileContext;
+
+  private int maxTagsLength = 0;
+
+  /** KeyValue version in FileInfo */
+  public static final byte [] KEY_VALUE_VERSION = Bytes.toBytes("KEY_VALUE_VERSION");
+
+  /** Version for KeyValue which includes memstore timestamp */
+  public static final int KEY_VALUE_VER_WITH_MEMSTORE = 1;
+
+  /** Inline block writers for multi-level block index and compound Blooms. */
+  private List<InlineBlockWriter> inlineBlockWriters = new ArrayList<InlineBlockWriter>();
+
+  /** block writer */
+  protected HFileBlock.Writer fsBlockWriter;
+
+  private HFileBlockIndex.BlockIndexWriter dataBlockIndexWriter;
+  private HFileBlockIndex.BlockIndexWriter metaBlockIndexWriter;
+
+  /** The offset of the first data block or -1 if the file is empty. */
+  private long firstDataBlockOffset = -1;
+
+  /** The offset of the last data block or 0 if the file is empty. */
+  protected long lastDataBlockOffset;
+
+  /**
+   * The last(stop) Cell of the previous data block.
+   * This reference should be short-lived since we write hfiles in a burst.
+   */
+  private Cell lastCellOfPreviousBlock = null;
+
+  /** Additional data items to be written to the "load-on-open" section. */
+  private List<BlockWritable> additionalLoadOnOpenData = new ArrayList<BlockWritable>();
+
+  protected long maxMemstoreTS = 0;
+
+  public HFileWriterImpl(final Configuration conf, CacheConfig cacheConf, Path path,
+      FSDataOutputStream outputStream,
+      KVComparator comparator, HFileContext fileContext) {
+    this.outputStream = outputStream;
+    this.path = path;
+    this.name = path != null ? path.getName() : outputStream.toString();
+    this.hFileContext = fileContext;
+    DataBlockEncoding encoding = hFileContext.getDataBlockEncoding();
+    if (encoding != DataBlockEncoding.NONE) {
+      this.blockEncoder = new HFileDataBlockEncoderImpl(encoding);
+    } else {
+      this.blockEncoder = NoOpDataBlockEncoder.INSTANCE;
+    }
+    this.comparator = comparator != null ? comparator
+        : KeyValue.COMPARATOR;
+
+    closeOutputStream = path != null;
+    this.cacheConf = cacheConf;
+    finishInit(conf);
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Writer" + (path != null ? " for " + path : "") +
+        " initialized with cacheConf: " + cacheConf +
+        " comparator: " + comparator.getClass().getSimpleName() +
+        " fileContext: " + fileContext);
+    }
+  }
+
+  /**
+   * Add to the file info. All added key/value pairs can be obtained using
+   * {@link HFile.Reader#loadFileInfo()}.
+   *
+   * @param k Key
+   * @param v Value
+   * @throws IOException in case the key or the value are invalid
+   */
+  @Override
+  public void appendFileInfo(final byte[] k, final byte[] v)
+      throws IOException {
+    fileInfo.append(k, v, true);
+  }
+
+  /**
+   * Sets the file info offset in the trailer, finishes up populating fields in
+   * the file info, and writes the file info into the given data output. The
+   * reason the data output is not always {@link #outputStream} is that we store
+   * file info as a block in version 2.
+   *
+   * @param trailer fixed file trailer
+   * @param out the data output to write the file info to
+   * @throws IOException
+   */
+  protected final void writeFileInfo(FixedFileTrailer trailer, DataOutputStream out)
+  throws IOException {
+    trailer.setFileInfoOffset(outputStream.getPos());
+    finishFileInfo();
+    fileInfo.write(out);
+  }
+
+  /**
+   * Checks that the given Cell's key does not violate the key order.
+   *
+   * @param cell Cell whose key to check.
+   * @return true if the key is duplicate
+   * @throws IOException if the key or the key order is wrong
+   */
+  protected boolean checkKey(final Cell cell) throws IOException {
+    boolean isDuplicateKey = false;
+
+    if (cell == null) {
+      throw new IOException("Key cannot be null or empty");
+    }
+    if (lastCell != null) {
+      int keyComp = comparator.compareOnlyKeyPortion(lastCell, cell);
+
+      if (keyComp > 0) {
+        throw new IOException("Added a key not lexically larger than"
+            + " previous. Current cell = " + cell + ", lastCell = " + lastCell);
+      } else if (keyComp == 0) {
+        isDuplicateKey = true;
+      }
+    }
+    return isDuplicateKey;
+  }
+
+  /** Checks the given value for validity. */
+  protected void checkValue(final byte[] value, final int offset,
+      final int length) throws IOException {
+    if (value == null) {
+      throw new IOException("Value cannot be null");
+    }
+  }
+
+  /**
+   * @return Path or null if we were passed a stream rather than a Path.
+   */
+  @Override
+  public Path getPath() {
+    return path;
+  }
+
+  @Override
+  public String toString() {
+    return "writer=" + (path != null ? path.toString() : null) + ", name="
+        + name + ", compression=" + hFileContext.getCompression().getName();
+  }
+
+  public static Compression.Algorithm compressionByName(String algoName) {
+    if (algoName == null)
+      return HFile.DEFAULT_COMPRESSION_ALGORITHM;
+    return Compression.getCompressionAlgorithmByName(algoName);
+  }
+
+  /** A helper method to create HFile output streams in constructors */
+  protected static FSDataOutputStream createOutputStream(Configuration conf,
+      FileSystem fs, Path path, InetSocketAddress[] favoredNodes) throws IOException {
+    FsPermission perms = FSUtils.getFilePermissions(fs, conf,
+        HConstants.DATA_FILE_UMASK_KEY);
+    return FSUtils.create(fs, path, perms, favoredNodes);
+  }
+
+  /** Additional initialization steps */
+  protected void finishInit(final Configuration conf) {
+    if (fsBlockWriter != null) {
+      throw new IllegalStateException("finishInit called twice");
+    }
+
+    fsBlockWriter = new HFileBlock.Writer(blockEncoder, hFileContext);
+
+    // Data block index writer
+    boolean cacheIndexesOnWrite = cacheConf.shouldCacheIndexesOnWrite();
+    dataBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter(fsBlockWriter,
+        cacheIndexesOnWrite ? cacheConf : null,
+        cacheIndexesOnWrite ? name : null);
+    dataBlockIndexWriter.setMaxChunkSize(
+        HFileBlockIndex.getMaxChunkSize(conf));
+    inlineBlockWriters.add(dataBlockIndexWriter);
+
+    // Meta data block index writer
+    metaBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter();
+    if (LOG.isTraceEnabled()) LOG.trace("Initialized with " + cacheConf);
+  }
+
+  /**
+   * At a block boundary, write all the inline blocks and opens new block.
+   *
+   * @throws IOException
+   */
+  protected void checkBlockBoundary() throws IOException {
+    if (fsBlockWriter.blockSizeWritten() < hFileContext.getBlocksize()) return;
+    finishBlock();
+    writeInlineBlocks(false);
+    newBlock();
+  }
+
+  /** Clean up the current data block */
+  private void finishBlock() throws IOException {
+    if (!fsBlockWriter.isWriting() || fsBlockWriter.blockSizeWritten() == 0) return;
+
+    // Update the first data block offset for scanning.
+    if (firstDataBlockOffset == -1) {
+      firstDataBlockOffset = outputStream.getPos();
+    }
+    // Update the last data block offset
+    lastDataBlockOffset = outputStream.getPos();
+    fsBlockWriter.writeHeaderAndData(outputStream);
+    int onDiskSize = fsBlockWriter.getOnDiskSizeWithHeader();
+    Cell indexEntry =
+      CellComparator.getMidpoint(this.comparator, lastCellOfPreviousBlock, firstCellInBlock);
+    dataBlockIndexWriter.addEntry(CellUtil.getCellKeySerializedAsKeyValueKey(indexEntry),
+      lastDataBlockOffset, onDiskSize);
+    totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader();
+    if (cacheConf.shouldCacheDataOnWrite()) {
+      doCacheOnWrite(lastDataBlockOffset);
+    }
+  }
+
+  /** Gives inline block writers an opportunity to contribute blocks. */
+  private void writeInlineBlocks(boolean closing) throws IOException {
+    for (InlineBlockWriter ibw : inlineBlockWriters) {
+      while (ibw.shouldWriteBlock(closing)) {
+        long offset = outputStream.getPos();
+        boolean cacheThisBlock = ibw.getCacheOnWrite();
+        ibw.writeInlineBlock(fsBlockWriter.startWriting(
+            ibw.getInlineBlockType()));
+        fsBlockWriter.writeHeaderAndData(outputStream);
+        ibw.blockWritten(offset, fsBlockWriter.getOnDiskSizeWithHeader(),
+            fsBlockWriter.getUncompressedSizeWithoutHeader());
+        totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader();
+
+        if (cacheThisBlock) {
+          doCacheOnWrite(offset);
+        }
+      }
+    }
+  }
+
+  /**
+   * Caches the last written HFile block.
+   * @param offset the offset of the block we want to cache. Used to determine
+   *          the cache key.
+   */
+  private void doCacheOnWrite(long offset) {
+    HFileBlock cacheFormatBlock = fsBlockWriter.getBlockForCaching(cacheConf);
+    cacheConf.getBlockCache().cacheBlock(new BlockCacheKey(name, offset), cacheFormatBlock);
+  }
+
+  /**
+   * Ready a new block for writing.
+   *
+   * @throws IOException
+   */
+  protected void newBlock() throws IOException {
+    // This is where the next block begins.
+    fsBlockWriter.startWriting(BlockType.DATA);
+    firstCellInBlock = null;
+    if (lastCell != null) {
+      lastCellOfPreviousBlock = lastCell;
+    }
+  }
+
+  /**
+   * Add a meta block to the end of the file. Call before close(). Metadata
+   * blocks are expensive. Fill one with a bunch of serialized data rather than
+   * do a metadata block per metadata instance. If metadata is small, consider
+   * adding to file info using {@link #appendFileInfo(byte[], byte[])}
+   *
+   * @param metaBlockName
+   *          name of the block
+   * @param content
+   *          will call readFields to get data later (DO NOT REUSE)
+   */
+  @Override
+  public void appendMetaBlock(String metaBlockName, Writable content) {
+    byte[] key = Bytes.toBytes(metaBlockName);
+    int i;
+    for (i = 0; i < metaNames.size(); ++i) {
+      // stop when the current key is greater than our own
+      byte[] cur = metaNames.get(i);
+      if (Bytes.BYTES_RAWCOMPARATOR.compare(cur, 0, cur.length, key, 0,
+          key.length) > 0) {
+        break;
+      }
+    }
+    metaNames.add(i, key);
+    metaData.add(i, content);
+  }
+
+  @Override
+  public void close() throws IOException {
+    if (outputStream == null) {
+      return;
+    }
+    // Save data block encoder metadata in the file info.
+    blockEncoder.saveMetadata(this);
+    // Write out the end of the data blocks, then write meta data blocks.
+    // followed by fileinfo, data block index and meta block index.
+
+    finishBlock();
+    writeInlineBlocks(true);
+
+    FixedFileTrailer trailer = new FixedFileTrailer(getMajorVersion(), getMinorVersion());
+
+    // Write out the metadata blocks if any.
+    if (!metaNames.isEmpty()) {
+      for (int i = 0; i < metaNames.size(); ++i) {
+        // store the beginning offset
+        long offset = outputStream.getPos();
+        // write the metadata content
+        DataOutputStream dos = fsBlockWriter.startWriting(BlockType.META);
+        metaData.get(i).write(dos);
+
+        fsBlockWriter.writeHeaderAndData(outputStream);
+        totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader();
+
+        // Add the new meta block to the meta index.
+        metaBlockIndexWriter.addEntry(metaNames.get(i), offset,
+            fsBlockWriter.getOnDiskSizeWithHeader());
+      }
+    }
+
+    // Load-on-open section.
+
+    // Data block index.
+    //
+    // In version 2, this section of the file starts with the root level data
+    // block index. We call a function that writes intermediate-level blocks
+    // first, then root level, and returns the offset of the root level block
+    // index.
+
+    long rootIndexOffset = dataBlockIndexWriter.writeIndexBlocks(outputStream);
+    trailer.setLoadOnOpenOffset(rootIndexOffset);
+
+    // Meta block index.
+    metaBlockIndexWriter.writeSingleLevelIndex(fsBlockWriter.startWriting(
+        BlockType.ROOT_INDEX), "meta");
+    fsBlockWriter.writeHeaderAndData(outputStream);
+    totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader();
+
+    if (this.hFileContext.isIncludesMvcc()) {
+      appendFileInfo(MAX_MEMSTORE_TS_KEY, Bytes.toBytes(maxMemstoreTS));
+      appendFileInfo(KEY_VALUE_VERSION, Bytes.toBytes(KEY_VALUE_VER_WITH_MEMSTORE));
+    }
+
+    // File info
+    writeFileInfo(trailer, fsBlockWriter.startWriting(BlockType.FILE_INFO));
+    fsBlockWriter.writeHeaderAndData(outputStream);
+    totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader();
+
+    // Load-on-open data supplied by higher levels, e.g. Bloom filters.
+    for (BlockWritable w : additionalLoadOnOpenData){
+      fsBlockWriter.writeBlock(w, outputStream);
+      totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader();
+    }
+
+    // Now finish off the trailer.
+    trailer.setNumDataIndexLevels(dataBlockIndexWriter.getNumLevels());
+    trailer.setUncompressedDataIndexSize(
+        dataBlockIndexWriter.getTotalUncompressedSize());
+    trailer.setFirstDataBlockOffset(firstDataBlockOffset);
+    trailer.setLastDataBlockOffset(lastDataBlockOffset);
+    trailer.setComparatorClass(comparator.getClass());
+    trailer.setDataIndexCount(dataBlockIndexWriter.getNumRootEntries());
+
+
+    finishClose(trailer);
+
+    fsBlockWriter.release();
+  }
+
+  @Override
+  public void addInlineBlockWriter(InlineBlockWriter ibw) {
+    inlineBlockWriters.add(ibw);
+  }
+
+  @Override
+  public void addGeneralBloomFilter(final BloomFilterWriter bfw) {
+    this.addBloomFilter(bfw, BlockType.GENERAL_BLOOM_META);
+  }
+
+  @Override
+  public void addDeleteFamilyBloomFilter(final BloomFilterWriter bfw) {
+    this.addBloomFilter(bfw, BlockType.DELETE_FAMILY_BLOOM_META);
+  }
+
+  private void addBloomFilter(final BloomFilterWriter bfw,
+      final BlockType blockType) {
+    if (bfw.getKeyCount() <= 0)
+      return;
+
+    if (blockType != BlockType.GENERAL_BLOOM_META &&
+        blockType != BlockType.DELETE_FAMILY_BLOOM_META) {
+      throw new RuntimeException("Block Type: " + blockType.toString() +
+          "is not supported");
+    }
+    additionalLoadOnOpenData.add(new BlockWritable() {
+      @Override
+      public BlockType getBlockType() {
+        return blockType;
+      }
+
+      @Override
+      public void writeToBlock(DataOutput out) throws IOException {
+        bfw.getMetaWriter().write(out);
+        Writable dataWriter = bfw.getDataWriter();
+        if (dataWriter != null)
+          dataWriter.write(out);
+      }
+    });
+  }
+
+  @Override
+  public HFileContext getFileContext() {
+    return hFileContext;
+  }
+
+  /**
+   * Add key/value to file. Keys must be added in an order that agrees with the
+   * Comparator passed on construction.
+   * 
+   * @param cell
+   *          Cell to add. Cannot be empty nor null.
+   * @throws IOException
+   */
+  @Override
+  public void append(final Cell cell) throws IOException {
+    byte[] value = cell.getValueArray();
+    int voffset = cell.getValueOffset();
+    int vlength = cell.getValueLength();
+    // checkKey uses comparator to check we are writing in order.
+    boolean dupKey = checkKey(cell);
+    checkValue(value, voffset, vlength);
+    if (!dupKey) {
+      checkBlockBoundary();
+    }
+
+    if (!fsBlockWriter.isWriting()) {
+      newBlock();
+    }
+
+    fsBlockWriter.write(cell);
+
+    totalKeyLength += CellUtil.estimatedSerializedSizeOfKey(cell);
+    totalValueLength += vlength;
+
+    // Are we the first key in this block?
+    if (firstCellInBlock == null) {
+      // If cell is big, block will be closed and this firstCellInBlock reference will only last
+      // a short while.
+      firstCellInBlock = cell;
+    }
+
+    // TODO: What if cell is 10MB and we write infrequently? We hold on to cell here indefinetly?
+    lastCell = cell;
+    entryCount++;
+    this.maxMemstoreTS = Math.max(this.maxMemstoreTS, cell.getSequenceId());
+    int tagsLength = cell.getTagsLength();
+    if (tagsLength > this.maxTagsLength) {
+      this.maxTagsLength = tagsLength;
+    }
+  }
+
+  protected void finishFileInfo() throws IOException {
+    if (lastCell != null) {
+      // Make a copy. The copy is stuffed into our fileinfo map. Needs a clean
+      // byte buffer. Won't take a tuple.
+      byte [] lastKey = CellUtil.getCellKeySerializedAsKeyValueKey(this.lastCell);
+      fileInfo.append(FileInfo.LASTKEY, lastKey, false);
+    }
+
+    // Average key length.
+    int avgKeyLen =
+        entryCount == 0 ? 0 : (int) (totalKeyLength / entryCount);
+    fileInfo.append(FileInfo.AVG_KEY_LEN, Bytes.toBytes(avgKeyLen), false);
+    fileInfo.append(FileInfo.CREATE_TIME_TS, Bytes.toBytes(hFileContext.getFileCreateTime()),
+      false);
+
+    // Average value length.
+    int avgValueLen =
+        entryCount == 0 ? 0 : (int) (totalValueLength / entryCount);
+    fileInfo.append(FileInfo.AVG_VALUE_LEN, Bytes.toBytes(avgValueLen), false);
+    if (hFileContext.getDataBlockEncoding() == DataBlockEncoding.PREFIX_TREE) {
+      // In case of Prefix Tree encoding, we always write tags information into HFiles even if all
+      // KVs are having no tags.
+      fileInfo.append(FileInfo.MAX_TAGS_LEN, Bytes.toBytes(this.maxTagsLength), false);
+    } else if (hFileContext.isIncludesTags()) {
+      // When tags are not being written in this file, MAX_TAGS_LEN is excluded
+      // from the FileInfo
+      fileInfo.append(FileInfo.MAX_TAGS_LEN, Bytes.toBytes(this.maxTagsLength), false);
+      boolean tagsCompressed = (hFileContext.getDataBlockEncoding() != DataBlockEncoding.NONE)
+        && hFileContext.isCompressTags();
+      fileInfo.append(FileInfo.TAGS_COMPRESSED, Bytes.toBytes(tagsCompressed), false);
+    }
+  }
+
+  protected int getMajorVersion() {
+    return 3;
+  }
+
+  protected int getMinorVersion() {
+    return HFileReaderImpl.MAX_MINOR_VERSION;
+  }
+
+  protected void finishClose(FixedFileTrailer trailer) throws IOException {
+    // Write out encryption metadata before finalizing if we have a valid crypto context
+    Encryption.Context cryptoContext = hFileContext.getEncryptionContext();
+    if (cryptoContext != Encryption.Context.NONE) {
+      // Wrap the context's key and write it as the encryption metadata, the wrapper includes
+      // all information needed for decryption
+      trailer.setEncryptionKey(EncryptionUtil.wrapKey(cryptoContext.getConf(),
+        cryptoContext.getConf().get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY,
+          User.getCurrent().getShortName()),
+        cryptoContext.getKey()));
+    }
+    // Now we can finish the close
+    trailer.setMetaIndexCount(metaNames.size());
+    trailer.setTotalUncompressedBytes(totalUncompressedBytes+ trailer.getTrailerSize());
+    trailer.setEntryCount(entryCount);
+    trailer.setCompressionCodec(hFileContext.getCompression());
+
+    trailer.serialize(outputStream);
+
+    if (closeOutputStream) {
+      outputStream.close();
+      outputStream = null;
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/691efc60/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
deleted file mode 100644
index 28c4655..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
+++ /dev/null
@@ -1,424 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.io.hfile;
-
-import java.io.DataOutput;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.KeyValue.KVComparator;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
-import org.apache.hadoop.hbase.io.hfile.HFileBlock.BlockWritable;
-import org.apache.hadoop.hbase.util.BloomFilterWriter;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.io.Writable;
-
-/**
- * Writes HFile format version 2.
- */
-@InterfaceAudience.Private
-public class HFileWriterV2 extends AbstractHFileWriter {
-  static final Log LOG = LogFactory.getLog(HFileWriterV2.class);
-
-  /** Max memstore (mvcc) timestamp in FileInfo */
-  public static final byte [] MAX_MEMSTORE_TS_KEY =
-      Bytes.toBytes("MAX_MEMSTORE_TS_KEY");
-
-  /** KeyValue version in FileInfo */
-  public static final byte [] KEY_VALUE_VERSION =
-      Bytes.toBytes("KEY_VALUE_VERSION");
-
-  /** Version for KeyValue which includes memstore timestamp */
-  public static final int KEY_VALUE_VER_WITH_MEMSTORE = 1;
-
-  /** Inline block writers for multi-level block index and compound Blooms. */
-  private List<InlineBlockWriter> inlineBlockWriters =
-      new ArrayList<InlineBlockWriter>();
-
-  /** Unified version 2 block writer */
-  protected HFileBlock.Writer fsBlockWriter;
-
-  private HFileBlockIndex.BlockIndexWriter dataBlockIndexWriter;
-  private HFileBlockIndex.BlockIndexWriter metaBlockIndexWriter;
-
-  /** The offset of the first data block or -1 if the file is empty. */
-  private long firstDataBlockOffset = -1;
-
-  /** The offset of the last data block or 0 if the file is empty. */
-  protected long lastDataBlockOffset;
-
-  /**
-   * The last(stop) Cell of the previous data block.
-   * This reference should be short-lived since we write hfiles in a burst.
-   */
-  private Cell lastCellOfPreviousBlock = null;
-
-  /** Additional data items to be written to the "load-on-open" section. */
-  private List<BlockWritable> additionalLoadOnOpenData =
-    new ArrayList<BlockWritable>();
-
-  protected long maxMemstoreTS = 0;
-
-  static class WriterFactoryV2 extends HFile.WriterFactory {
-    WriterFactoryV2(Configuration conf, CacheConfig cacheConf) {
-      super(conf, cacheConf);
-    }
-
-    @Override
-    public Writer createWriter(FileSystem fs, Path path, 
-        FSDataOutputStream ostream,
-        KVComparator comparator, HFileContext context) throws IOException {
-      context.setIncludesTags(false);// HFile V2 does not deal with tags at all!
-      return new HFileWriterV2(conf, cacheConf, fs, path, ostream, 
-          comparator, context);
-      }
-    }
-
-  /** Constructor that takes a path, creates and closes the output stream. */
-  public HFileWriterV2(Configuration conf, CacheConfig cacheConf,
-      FileSystem fs, Path path, FSDataOutputStream ostream, 
-      final KVComparator comparator, final HFileContext context) throws IOException {
-    super(cacheConf,
-        ostream == null ? createOutputStream(conf, fs, path, null) : ostream,
-        path, comparator, context);
-    finishInit(conf);
-  }
-
-  /** Additional initialization steps */
-  protected void finishInit(final Configuration conf) {
-    if (fsBlockWriter != null)
-      throw new IllegalStateException("finishInit called twice");
-
-    fsBlockWriter = new HFileBlock.Writer(blockEncoder, hFileContext);
-
-    // Data block index writer
-    boolean cacheIndexesOnWrite = cacheConf.shouldCacheIndexesOnWrite();
-    dataBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter(fsBlockWriter,
-        cacheIndexesOnWrite ? cacheConf : null,
-        cacheIndexesOnWrite ? name : null);
-    dataBlockIndexWriter.setMaxChunkSize(
-        HFileBlockIndex.getMaxChunkSize(conf));
-    inlineBlockWriters.add(dataBlockIndexWriter);
-
-    // Meta data block index writer
-    metaBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter();
-    if (LOG.isTraceEnabled()) LOG.trace("Initialized with " + cacheConf);
-  }
-
-  /**
-   * At a block boundary, write all the inline blocks and opens new block.
-   *
-   * @throws IOException
-   */
-  protected void checkBlockBoundary() throws IOException {
-    if (fsBlockWriter.blockSizeWritten() < hFileContext.getBlocksize())
-      return;
-
-    finishBlock();
-    writeInlineBlocks(false);
-    newBlock();
-  }
-
-  /** Clean up the current data block */
-  private void finishBlock() throws IOException {
-    if (!fsBlockWriter.isWriting() || fsBlockWriter.blockSizeWritten() == 0)
-      return;
-
-    // Update the first data block offset for scanning.
-    if (firstDataBlockOffset == -1) {
-      firstDataBlockOffset = outputStream.getPos();
-    }
-    // Update the last data block offset
-    lastDataBlockOffset = outputStream.getPos();
-    fsBlockWriter.writeHeaderAndData(outputStream);
-    int onDiskSize = fsBlockWriter.getOnDiskSizeWithHeader();
-
-    Cell indexEntry =
-      CellComparator.getMidpoint(this.comparator, lastCellOfPreviousBlock, firstCellInBlock);
-    dataBlockIndexWriter.addEntry(CellUtil.getCellKeySerializedAsKeyValueKey(indexEntry),
-      lastDataBlockOffset, onDiskSize);
-    totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader();
-    if (cacheConf.shouldCacheDataOnWrite()) {
-      doCacheOnWrite(lastDataBlockOffset);
-    }
-  }
-
-  /** Gives inline block writers an opportunity to contribute blocks. */
-  private void writeInlineBlocks(boolean closing) throws IOException {
-    for (InlineBlockWriter ibw : inlineBlockWriters) {
-      while (ibw.shouldWriteBlock(closing)) {
-        long offset = outputStream.getPos();
-        boolean cacheThisBlock = ibw.getCacheOnWrite();
-        ibw.writeInlineBlock(fsBlockWriter.startWriting(
-            ibw.getInlineBlockType()));
-        fsBlockWriter.writeHeaderAndData(outputStream);
-        ibw.blockWritten(offset, fsBlockWriter.getOnDiskSizeWithHeader(),
-            fsBlockWriter.getUncompressedSizeWithoutHeader());
-        totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader();
-
-        if (cacheThisBlock) {
-          doCacheOnWrite(offset);
-        }
-      }
-    }
-  }
-
-  /**
-   * Caches the last written HFile block.
-   * @param offset the offset of the block we want to cache. Used to determine
-   *          the cache key.
-   */
-  private void doCacheOnWrite(long offset) {
-    HFileBlock cacheFormatBlock = fsBlockWriter.getBlockForCaching(cacheConf);
-    cacheConf.getBlockCache().cacheBlock(
-        new BlockCacheKey(name, offset), cacheFormatBlock);
-  }
-
-  /**
-   * Ready a new block for writing.
-   *
-   * @throws IOException
-   */
-  protected void newBlock() throws IOException {
-    // This is where the next block begins.
-    fsBlockWriter.startWriting(BlockType.DATA);
-    firstCellInBlock = null;
-    if (lastCell != null) {
-      lastCellOfPreviousBlock = lastCell;
-    }
-  }
-
-  /**
-   * Add a meta block to the end of the file. Call before close(). Metadata
-   * blocks are expensive. Fill one with a bunch of serialized data rather than
-   * do a metadata block per metadata instance. If metadata is small, consider
-   * adding to file info using {@link #appendFileInfo(byte[], byte[])}
-   *
-   * @param metaBlockName
-   *          name of the block
-   * @param content
-   *          will call readFields to get data later (DO NOT REUSE)
-   */
-  @Override
-  public void appendMetaBlock(String metaBlockName, Writable content) {
-    byte[] key = Bytes.toBytes(metaBlockName);
-    int i;
-    for (i = 0; i < metaNames.size(); ++i) {
-      // stop when the current key is greater than our own
-      byte[] cur = metaNames.get(i);
-      if (Bytes.BYTES_RAWCOMPARATOR.compare(cur, 0, cur.length, key, 0,
-          key.length) > 0) {
-        break;
-      }
-    }
-    metaNames.add(i, key);
-    metaData.add(i, content);
-  }
-
-  /**
-   * Add key/value to file. Keys must be added in an order that agrees with the
-   * Comparator passed on construction.
-   *
-   * @param cell Cell to add. Cannot be empty nor null.
-   * @throws IOException
-   */
-  @Override
-  public void append(final Cell cell) throws IOException {
-    byte[] value = cell.getValueArray();
-    int voffset = cell.getValueOffset();
-    int vlength = cell.getValueLength();
-    // checkKey uses comparator to check we are writing in order.
-    boolean dupKey = checkKey(cell);
-    checkValue(value, voffset, vlength);
-    if (!dupKey) {
-      checkBlockBoundary();
-    }
-
-    if (!fsBlockWriter.isWriting()) {
-      newBlock();
-    }
-
-    fsBlockWriter.write(cell);
-
-    totalKeyLength += CellUtil.estimatedSerializedSizeOfKey(cell);
-    totalValueLength += vlength;
-
-    // Are we the first key in this block?
-    if (firstCellInBlock == null) {
-      // If cell is big, block will be closed and this firstCellInBlock reference will only last
-      // a short while.
-      firstCellInBlock = cell;
-    }
-
-    // TODO: What if cell is 10MB and we write infrequently?  We'll hold on to the cell here
-    // indefinetly?
-    lastCell = cell;
-    entryCount++;
-    this.maxMemstoreTS = Math.max(this.maxMemstoreTS, cell.getSequenceId());
-  }
-
-  @Override
-  public void close() throws IOException {
-    if (outputStream == null) {
-      return;
-    }
-    // Save data block encoder metadata in the file info.
-    blockEncoder.saveMetadata(this);
-    // Write out the end of the data blocks, then write meta data blocks.
-    // followed by fileinfo, data block index and meta block index.
-
-    finishBlock();
-    writeInlineBlocks(true);
-
-    FixedFileTrailer trailer = new FixedFileTrailer(getMajorVersion(), getMinorVersion());
-
-    // Write out the metadata blocks if any.
-    if (!metaNames.isEmpty()) {
-      for (int i = 0; i < metaNames.size(); ++i) {
-        // store the beginning offset
-        long offset = outputStream.getPos();
-        // write the metadata content
-        DataOutputStream dos = fsBlockWriter.startWriting(BlockType.META);
-        metaData.get(i).write(dos);
-
-        fsBlockWriter.writeHeaderAndData(outputStream);
-        totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader();
-
-        // Add the new meta block to the meta index.
-        metaBlockIndexWriter.addEntry(metaNames.get(i), offset,
-            fsBlockWriter.getOnDiskSizeWithHeader());
-      }
-    }
-
-    // Load-on-open section.
-
-    // Data block index.
-    //
-    // In version 2, this section of the file starts with the root level data
-    // block index. We call a function that writes intermediate-level blocks
-    // first, then root level, and returns the offset of the root level block
-    // index.
-
-    long rootIndexOffset = dataBlockIndexWriter.writeIndexBlocks(outputStream);
-    trailer.setLoadOnOpenOffset(rootIndexOffset);
-
-    // Meta block index.
-    metaBlockIndexWriter.writeSingleLevelIndex(fsBlockWriter.startWriting(
-        BlockType.ROOT_INDEX), "meta");
-    fsBlockWriter.writeHeaderAndData(outputStream);
-    totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader();
-
-    if (this.hFileContext.isIncludesMvcc()) {
-      appendFileInfo(MAX_MEMSTORE_TS_KEY, Bytes.toBytes(maxMemstoreTS));
-      appendFileInfo(KEY_VALUE_VERSION, Bytes.toBytes(KEY_VALUE_VER_WITH_MEMSTORE));
-    }
-
-    // File info
-    writeFileInfo(trailer, fsBlockWriter.startWriting(BlockType.FILE_INFO));
-    fsBlockWriter.writeHeaderAndData(outputStream);
-    totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader();
-
-    // Load-on-open data supplied by higher levels, e.g. Bloom filters.
-    for (BlockWritable w : additionalLoadOnOpenData){
-      fsBlockWriter.writeBlock(w, outputStream);
-      totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader();
-    }
-
-    // Now finish off the trailer.
-    trailer.setNumDataIndexLevels(dataBlockIndexWriter.getNumLevels());
-    trailer.setUncompressedDataIndexSize(
-        dataBlockIndexWriter.getTotalUncompressedSize());
-    trailer.setFirstDataBlockOffset(firstDataBlockOffset);
-    trailer.setLastDataBlockOffset(lastDataBlockOffset);
-    trailer.setComparatorClass(comparator.getClass());
-    trailer.setDataIndexCount(dataBlockIndexWriter.getNumRootEntries());
-
-
-    finishClose(trailer);
-
-    fsBlockWriter.release();
-  }
-
-  @Override
-  public void addInlineBlockWriter(InlineBlockWriter ibw) {
-    inlineBlockWriters.add(ibw);
-  }
-
-  @Override
-  public void addGeneralBloomFilter(final BloomFilterWriter bfw) {
-    this.addBloomFilter(bfw, BlockType.GENERAL_BLOOM_META);
-  }
-
-  @Override
-  public void addDeleteFamilyBloomFilter(final BloomFilterWriter bfw) {
-    this.addBloomFilter(bfw, BlockType.DELETE_FAMILY_BLOOM_META);
-  }
-
-  private void addBloomFilter(final BloomFilterWriter bfw,
-      final BlockType blockType) {
-    if (bfw.getKeyCount() <= 0)
-      return;
-
-    if (blockType != BlockType.GENERAL_BLOOM_META &&
-        blockType != BlockType.DELETE_FAMILY_BLOOM_META) {
-      throw new RuntimeException("Block Type: " + blockType.toString() +
-          "is not supported");
-    }
-    additionalLoadOnOpenData.add(new BlockWritable() {
-      @Override
-      public BlockType getBlockType() {
-        return blockType;
-      }
-
-      @Override
-      public void writeToBlock(DataOutput out) throws IOException {
-        bfw.getMetaWriter().write(out);
-        Writable dataWriter = bfw.getDataWriter();
-        if (dataWriter != null)
-          dataWriter.write(out);
-      }
-    });
-  }
-
-  protected int getMajorVersion() {
-    return 2;
-  }
-
-  protected int getMinorVersion() {
-    return HFileReaderV2.MAX_MINOR_VERSION;
-  }
-
-  @Override
-  public HFileContext getFileContext() {
-    return hFileContext;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/691efc60/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java
deleted file mode 100644
index 086395c..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.io.hfile;
-
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue.KVComparator;
-import org.apache.hadoop.hbase.io.crypto.Encryption;
-import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
-import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
-import org.apache.hadoop.hbase.security.EncryptionUtil;
-import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.util.Bytes;
-
-/**
- * {@link HFile} writer for version 3.
- */
-@InterfaceAudience.Private
-public class HFileWriterV3 extends HFileWriterV2 {
-
-  private static final Log LOG = LogFactory.getLog(HFileWriterV3.class);
-
-  private int maxTagsLength = 0;
-
-  static class WriterFactoryV3 extends HFile.WriterFactory {
-    WriterFactoryV3(Configuration conf, CacheConfig cacheConf) {
-      super(conf, cacheConf);
-    }
-
-    @Override
-    public Writer createWriter(FileSystem fs, Path path, FSDataOutputStream ostream,
-        final KVComparator comparator, HFileContext fileContext)
-        throws IOException {
-      return new HFileWriterV3(conf, cacheConf, fs, path, ostream, comparator, fileContext);
-    }
-  }
-
-  /** Constructor that takes a path, creates and closes the output stream. */
-  public HFileWriterV3(Configuration conf, CacheConfig cacheConf, FileSystem fs, Path path,
-      FSDataOutputStream ostream, final KVComparator comparator,
-      final HFileContext fileContext) throws IOException {
-    super(conf, cacheConf, fs, path, ostream, comparator, fileContext);
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("Writer" + (path != null ? " for " + path : "") +
-        " initialized with cacheConf: " + cacheConf +
-        " comparator: " + comparator.getClass().getSimpleName() +
-        " fileContext: " + fileContext);
-    }
-  }
-
-  /**
-   * Add key/value to file. Keys must be added in an order that agrees with the
-   * Comparator passed on construction.
-   * 
-   * @param cell
-   *          Cell to add. Cannot be empty nor null.
-   * @throws IOException
-   */
-  @Override
-  public void append(final Cell cell) throws IOException {
-    // Currently get the complete arrays
-    super.append(cell);
-    int tagsLength = cell.getTagsLength();
-    if (tagsLength > this.maxTagsLength) {
-      this.maxTagsLength = tagsLength;
-    }
-  }
-
-  protected void finishFileInfo() throws IOException {
-    super.finishFileInfo();
-    if (hFileContext.getDataBlockEncoding() == DataBlockEncoding.PREFIX_TREE) {
-      // In case of Prefix Tree encoding, we always write tags information into HFiles even if all
-      // KVs are having no tags.
-      fileInfo.append(FileInfo.MAX_TAGS_LEN, Bytes.toBytes(this.maxTagsLength), false);
-    } else if (hFileContext.isIncludesTags()) {
-      // When tags are not being written in this file, MAX_TAGS_LEN is excluded
-      // from the FileInfo
-      fileInfo.append(FileInfo.MAX_TAGS_LEN, Bytes.toBytes(this.maxTagsLength), false);
-      boolean tagsCompressed = (hFileContext.getDataBlockEncoding() != DataBlockEncoding.NONE)
-        && hFileContext.isCompressTags();
-      fileInfo.append(FileInfo.TAGS_COMPRESSED, Bytes.toBytes(tagsCompressed), false);
-    }
-  }
-
-  @Override
-  protected int getMajorVersion() {
-    return 3;
-  }
-
-  @Override
-  protected int getMinorVersion() {
-    return HFileReaderV3.MAX_MINOR_VERSION;
-  }
-
-  @Override
-  protected void finishClose(FixedFileTrailer trailer) throws IOException {
-    // Write out encryption metadata before finalizing if we have a valid crypto context
-    Encryption.Context cryptoContext = hFileContext.getEncryptionContext();
-    if (cryptoContext != Encryption.Context.NONE) {
-      // Wrap the context's key and write it as the encryption metadata, the wrapper includes
-      // all information needed for decryption
-      trailer.setEncryptionKey(EncryptionUtil.wrapKey(cryptoContext.getConf(),
-        cryptoContext.getConf().get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY,
-          User.getCurrent().getShortName()),
-        cryptoContext.getKey()));
-    }
-    // Now we can finish the close
-    super.finishClose(trailer);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/691efc60/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
index 465be56..17465ea 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
@@ -51,7 +51,7 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-import org.apache.hadoop.hbase.io.hfile.AbstractHFileWriter;
+import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
@@ -129,7 +129,7 @@ public class HFileOutputFormat2
     // Invented config.  Add to hbase-*.xml if other than default compression.
     final String defaultCompressionStr = conf.get("hfile.compression",
         Compression.Algorithm.NONE.getName());
-    final Algorithm defaultCompression = AbstractHFileWriter
+    final Algorithm defaultCompression = HFileWriterImpl
         .compressionByName(defaultCompressionStr);
     final boolean compactionExclude = conf.getBoolean(
         "hbase.mapreduce.hfileoutputformat.compaction.exclude", false);
@@ -245,7 +245,7 @@ public class HFileOutputFormat2
                                     .withBlockSize(blockSize);
         contextBuilder.withDataBlockEncoding(encoding);
         HFileContext hFileContext = contextBuilder.build();
-                                    
+
         wl.writer = new StoreFile.WriterBuilder(conf, new CacheConfig(tempConf), fs)
             .withOutputDir(familydir).withBloomType(bloomType)
             .withComparator(KeyValue.COMPARATOR)
@@ -358,7 +358,7 @@ public class HFileOutputFormat2
    * </ul>
    * The user should be sure to set the map output value class to either KeyValue or Put before
    * running this function.
-   * 
+   *
    * @deprecated Use {@link #configureIncrementalLoad(Job, Table, RegionLocator)} instead.
    */
   @Deprecated
@@ -448,7 +448,7 @@ public class HFileOutputFormat2
     TableMapReduceUtil.initCredentials(job);
     LOG.info("Incremental table " + regionLocator.getName() + " output configured.");
   }
-  
+
   public static void configureIncrementalLoadMap(Job job, Table table) throws IOException {
     Configuration conf = job.getConfiguration();
 
@@ -483,8 +483,7 @@ public class HFileOutputFormat2
     Map<byte[], Algorithm> compressionMap = new TreeMap<byte[],
         Algorithm>(Bytes.BYTES_COMPARATOR);
     for (Map.Entry<byte[], String> e : stringMap.entrySet()) {
-      Algorithm algorithm = AbstractHFileWriter.compressionByName
-          (e.getValue());
+      Algorithm algorithm = HFileWriterImpl.compressionByName(e.getValue());
       compressionMap.put(e.getKey(), algorithm);
     }
     return compressionMap;

http://git-wip-us.apache.org/repos/asf/hbase/blob/691efc60/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index af457ec..de4b8c4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -95,6 +95,7 @@ import org.apache.hadoop.hbase.executor.ExecutorType;
 import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.http.InfoServer;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.ipc.RpcClient;
 import org.apache.hadoop.hbase.ipc.RpcClientFactory;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
@@ -490,6 +491,7 @@ public class HRegionServer extends HasThread implements
       throws IOException, InterruptedException {
     this.fsOk = true;
     this.conf = conf;
+    HFile.checkHFileVersion(this.conf);
     checkCodecs(this.conf);
     this.userProvider = UserProvider.instantiate(conf);
     FSUtils.setupShortCircuitRead(this.conf);

http://git-wip-us.apache.org/repos/asf/hbase/blob/691efc60/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
index 8910042..0bb2391 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
@@ -136,7 +136,7 @@ public interface Region extends ConfigurationObserver {
    */
   long getOldestHfileTs(boolean majorCompactioOnly) throws IOException;
 
-  /** 
+  /**
    * @return map of column family names to max sequence id that was read from storage when this
    * region was opened
    */
@@ -157,7 +157,7 @@ public interface Region extends ConfigurationObserver {
 
   ///////////////////////////////////////////////////////////////////////////
   // Metrics
-  
+
   /** @return read requests count for this region */
   long getReadRequestsCount();
 
@@ -181,7 +181,7 @@ public interface Region extends ConfigurationObserver {
 
   /** @return the number of mutations processed bypassing the WAL */
   long getNumMutationsWithoutWAL();
-  
+
   /** @return the size of data processed bypassing the WAL, in bytes */
   long getDataInMemoryWithoutWAL();
 
@@ -216,7 +216,7 @@ public interface Region extends ConfigurationObserver {
 
   /**
    * This method needs to be called before any public call that reads or
-   * modifies data. 
+   * modifies data.
    * Acquires a read lock and checks if the region is closing or closed.
    * <p>{@link #closeRegionOperation} MUST then always be called after
    * the operation has completed, whether it succeeded or failed.
@@ -226,7 +226,7 @@ public interface Region extends ConfigurationObserver {
 
   /**
    * This method needs to be called before any public call that reads or
-   * modifies data. 
+   * modifies data.
    * Acquires a read lock and checks if the region is closing or closed.
    * <p>{@link #closeRegionOperation} MUST then always be called after
    * the operation has completed, whether it succeeded or failed.
@@ -413,7 +413,7 @@ public interface Region extends ConfigurationObserver {
 
   /**
    * Perform atomic mutations within the region.
-   * 
+   *
    * @param mutations The list of mutations to perform.
    * <code>mutations</code> can contain operations for multiple rows.
    * Caller has to ensure that all rows are contained in this region.
@@ -588,7 +588,7 @@ public interface Region extends ConfigurationObserver {
       byte[] now) throws IOException;
 
   /**
-   * Replace any cell timestamps set to HConstants#LATEST_TIMESTAMP with the
+   * Replace any cell timestamps set to {@link org.apache.hadoop.hbase.HConstants#LATEST_TIMESTAMP}
    * provided current timestamp.
    * @param values
    * @param now
@@ -609,13 +609,13 @@ public interface Region extends ConfigurationObserver {
       CANNOT_FLUSH_MEMSTORE_EMPTY,
       CANNOT_FLUSH
     }
-    
+
     /** @return the detailed result code */
     Result getResult();
 
     /** @return true if the memstores were flushed, else false */
     boolean isFlushSucceeded();
-    
+
     /** @return True if the flush requested a compaction, else false */
     boolean isCompactionNeeded();
   }
@@ -647,7 +647,7 @@ public interface Region extends ConfigurationObserver {
    * Synchronously compact all stores in the region.
    * <p>This operation could block for a long time, so don't call it from a
    * time-sensitive thread.
-   * <p>Note that no locks are taken to prevent possible conflicts between 
+   * <p>Note that no locks are taken to prevent possible conflicts between
    * compaction and splitting activities. The regionserver does not normally compact
    * and split in parallel. However by calling this method you may introduce
    * unexpected and unhandled concurrency. Don't do this unless you know what

http://git-wip-us.apache.org/repos/asf/hbase/blob/691efc60/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
index c1a6b76..345dd9b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
@@ -51,7 +51,6 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
-import org.apache.hadoop.hbase.io.hfile.HFileWriterV2;
 import org.apache.hadoop.hbase.regionserver.compactions.Compactor;
 import org.apache.hadoop.hbase.util.BloomFilter;
 import org.apache.hadoop.hbase.util.BloomFilterFactory;
@@ -409,7 +408,7 @@ public class StoreFile {
     }
     this.reader.setSequenceID(this.sequenceid);
 
-    b = metadataMap.get(HFileWriterV2.MAX_MEMSTORE_TS_KEY);
+    b = metadataMap.get(HFile.Writer.MAX_MEMSTORE_TS_KEY);
     if (b != null) {
       this.maxMemstoreTS = Bytes.toLong(b);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/691efc60/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
index 3c3ea6b..ae820b5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
@@ -34,8 +34,8 @@ import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.compress.Compression;
+import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
-import org.apache.hadoop.hbase.io.hfile.HFileWriterV2;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.InternalScanner.NextState;
@@ -142,7 +142,7 @@ public abstract class Compactor {
         fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, r.getSequenceID());
       }
       else {
-        tmp = fileInfo.get(HFileWriterV2.MAX_MEMSTORE_TS_KEY);
+        tmp = fileInfo.get(HFile.Writer.MAX_MEMSTORE_TS_KEY);
         if (tmp != null) {
           fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, Bytes.toLong(tmp));
         }

http://git-wip-us.apache.org/repos/asf/hbase/blob/691efc60/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java
index ba3cbe3..0058eaa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.io.compress.Compression;
-import org.apache.hadoop.hbase.io.hfile.AbstractHFileWriter;
+import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
@@ -120,7 +120,7 @@ public class CompressionTest {
   throws Exception {
     Configuration conf = HBaseConfiguration.create();
     HFileContext context = new HFileContextBuilder()
-                           .withCompression(AbstractHFileWriter.compressionByName(codec)).build();
+                           .withCompression(HFileWriterImpl.compressionByName(codec)).build();
     HFile.Writer writer = HFile.getWriterFactoryNoCache(conf)
         .withPath(fs, path)
         .withFileContext(context)

http://git-wip-us.apache.org/repos/asf/hbase/blob/691efc60/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
index ea10f60..cb12bea 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
@@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.io.crypto.Encryption;
 import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting;
 import org.apache.hadoop.hbase.io.crypto.aes.AES;
-import org.apache.hadoop.hbase.io.hfile.AbstractHFileWriter;
+import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
@@ -326,7 +326,7 @@ public class HFilePerformanceEvaluation {
     void setUp() throws Exception {
 
       HFileContextBuilder builder = new HFileContextBuilder()
-          .withCompression(AbstractHFileWriter.compressionByName(codec))
+          .withCompression(HFileWriterImpl.compressionByName(codec))
           .withBlockSize(RFILE_BLOCKSIZE);
       
       if (cipher == "aes") {

http://git-wip-us.apache.org/repos/asf/hbase/blob/691efc60/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
index a025e6c..523a72a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
@@ -241,7 +241,6 @@ public class TestCacheOnWrite {
   public void setUp() throws IOException {
     conf = TEST_UTIL.getConfiguration();
     this.conf.set("dfs.datanode.data.dir.perm", "700");
-    conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);
     conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, INDEX_BLOCK_SIZE);
     conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE,
         BLOOM_BLOCK_SIZE);
@@ -271,12 +270,7 @@ public class TestCacheOnWrite {
   }
 
   private void readStoreFile(boolean useTags) throws IOException {
-    AbstractHFileReader reader;
-    if (useTags) {
-        reader = (HFileReaderV3) HFile.createReader(fs, storeFilePath, cacheConf, conf);
-    } else {
-        reader = (HFileReaderV2) HFile.createReader(fs, storeFilePath, cacheConf, conf);
-    }
+    HFile.Reader reader = HFile.createReader(fs, storeFilePath, cacheConf, conf);
     LOG.info("HFile information: " + reader);
     HFileContext meta = new HFileContextBuilder().withCompression(compress)
       .withBytesPerCheckSum(CKBYTES).withChecksumType(ChecksumType.NULL)
@@ -377,11 +371,6 @@ public class TestCacheOnWrite {
   }
 
   private void writeStoreFile(boolean useTags) throws IOException {
-    if(useTags) {
-      TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
-    } else {
-      TEST_UTIL.getConfiguration().setInt("hfile.format.version", 2);
-    }
     Path storeFileParentDir = new Path(TEST_UTIL.getDataTestDir(),
         "test_cache_on_write");
     HFileContext meta = new HFileContextBuilder().withCompression(compress)
@@ -421,11 +410,6 @@ public class TestCacheOnWrite {
 
   private void testNotCachingDataBlocksDuringCompactionInternals(boolean useTags)
       throws IOException, InterruptedException {
-    if (useTags) {
-      TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
-    } else {
-      TEST_UTIL.getConfiguration().setInt("hfile.format.version", 2);
-    }
     // TODO: need to change this test if we add a cache size threshold for
     // compactions, or if we implement some other kind of intelligent logic for
     // deciding what blocks to cache-on-write on compaction.

http://git-wip-us.apache.org/repos/asf/hbase/blob/691efc60/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java
index 29adb9c..4b91013 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java
@@ -89,7 +89,7 @@ public class TestFixedFileTrailer {
   @Test
   public void testTrailer() throws IOException {
     FixedFileTrailer t = new FixedFileTrailer(version, 
-        HFileReaderV2.PBUF_TRAILER_MINOR_VERSION);
+        HFileReaderImpl.PBUF_TRAILER_MINOR_VERSION);
     t.setDataIndexCount(3);
     t.setEntryCount(((long) Integer.MAX_VALUE) + 1);
 
@@ -122,7 +122,7 @@ public class TestFixedFileTrailer {
     {
       DataInputStream dis = new DataInputStream(bais);
       FixedFileTrailer t2 = new FixedFileTrailer(version, 
-          HFileReaderV2.PBUF_TRAILER_MINOR_VERSION);
+          HFileReaderImpl.PBUF_TRAILER_MINOR_VERSION);
       t2.deserialize(dis);
       assertEquals(-1, bais.read()); // Ensure we have read everything.
       checkLoadedTrailer(version, t, t2);
@@ -171,7 +171,7 @@ public class TestFixedFileTrailer {
   public void testTrailerForV2NonPBCompatibility() throws Exception {
     if (version == 2) {
       FixedFileTrailer t = new FixedFileTrailer(version,
-          HFileReaderV2.MINOR_VERSION_NO_CHECKSUM);
+          HFileReaderImpl.MINOR_VERSION_NO_CHECKSUM);
       t.setDataIndexCount(3);
       t.setEntryCount(((long) Integer.MAX_VALUE) + 1);
       t.setLastDataBlockOffset(291);
@@ -198,7 +198,7 @@ public class TestFixedFileTrailer {
       {
         DataInputStream dis = new DataInputStream(bais);
         FixedFileTrailer t2 = new FixedFileTrailer(version,
-            HFileReaderV2.MINOR_VERSION_NO_CHECKSUM);
+            HFileReaderImpl.MINOR_VERSION_NO_CHECKSUM);
         t2.deserialize(dis);
         assertEquals(-1, bais.read()); // Ensure we have read everything.
         checkLoadedTrailer(version, t, t2);

http://git-wip-us.apache.org/repos/asf/hbase/blob/691efc60/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
index 64f46c8..b91cdd2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
@@ -81,8 +81,6 @@ public class TestForceCacheImportantBlocks {
   public static Collection<Object[]> parameters() {
     // HFile versions
     return Arrays.asList(
-      new Object[] { 2, true },
-      new Object[] { 2, false },
       new Object[] { 3, true },
       new Object[] { 3, false }
     );

http://git-wip-us.apache.org/repos/asf/hbase/blob/691efc60/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
index d198274..f325451 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
@@ -239,7 +239,7 @@ public class TestHFile extends HBaseTestCase {
     FSDataOutputStream fout = createFSOutput(ncTFile);
     HFileContext meta = new HFileContextBuilder()
                         .withBlockSize(minBlockSize)
-                        .withCompression(AbstractHFileWriter.compressionByName(codec))
+                        .withCompression(HFileWriterImpl.compressionByName(codec))
                         .build();
     Writer writer = HFile.getWriterFactory(conf, cacheConf)
         .withOutputStream(fout)
@@ -330,7 +330,7 @@ public class TestHFile extends HBaseTestCase {
     Path mFile = new Path(ROOT_DIR, "meta.hfile");
     FSDataOutputStream fout = createFSOutput(mFile);
     HFileContext meta = new HFileContextBuilder()
-                        .withCompression(AbstractHFileWriter.compressionByName(compress))
+                        .withCompression(HFileWriterImpl.compressionByName(compress))
                         .withBlockSize(minBlockSize).build();
     Writer writer = HFile.getWriterFactory(conf, cacheConf)
         .withOutputStream(fout)

http://git-wip-us.apache.org/repos/asf/hbase/blob/691efc60/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
index 7c9c45a..dc8c2fc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
@@ -589,7 +589,7 @@ public class TestHFileBlockIndex {
       }
 
       // Manually compute the mid-key and validate it.
-      HFileReaderV2 reader2 = (HFileReaderV2) reader;
+      HFile.Reader reader2 = reader;
       HFileBlock.FSReader fsReader = reader2.getUncachedBlockReader();
 
       HFileBlock.BlockIterator iter = fsReader.blockRange(0,

http://git-wip-us.apache.org/repos/asf/hbase/blob/691efc60/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java
index d07a6de..5b3e712 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java
@@ -54,8 +54,7 @@ public class TestHFileInlineToRootChunkConversion {
     CacheConfig cacheConf = new CacheConfig(conf);
     conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, maxChunkSize);
     HFileContext context = new HFileContextBuilder().withBlockSize(16).build();
-    HFileWriterV2 hfw =
-        (HFileWriterV2) new HFileWriterV2.WriterFactoryV2(conf, cacheConf)
+    HFile.Writer hfw = new HFileWriterFactory(conf, cacheConf)
             .withFileContext(context)
             .withPath(fs, hfPath).create();
     List<byte[]> keys = new ArrayList<byte[]>();
@@ -77,7 +76,7 @@ public class TestHFileInlineToRootChunkConversion {
     }
     hfw.close();
 
-    HFileReaderV2 reader = (HFileReaderV2) HFile.createReader(fs, hfPath, cacheConf, conf);
+    HFile.Reader reader = HFile.createReader(fs, hfPath, cacheConf, conf);
     // Scanner doesn't do Cells yet.  Fix.
     HFileScanner scanner = reader.getScanner(true, true);
     for (int i = 0; i < keys.size(); ++i) {
@@ -85,4 +84,4 @@ public class TestHFileInlineToRootChunkConversion {
     }
     reader.close();
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/691efc60/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
index e502eb6..accf7ad 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
@@ -129,7 +129,7 @@ public class TestHFileSeek extends TestCase {
     try {
       HFileContext context = new HFileContextBuilder()
                             .withBlockSize(options.minBlockSize)
-                            .withCompression(AbstractHFileWriter.compressionByName(options.compress))
+                            .withCompression(HFileWriterImpl.compressionByName(options.compress))
                             .build();
       Writer writer = HFile.getWriterFactoryNoCache(conf)
           .withOutputStream(fout)

http://git-wip-us.apache.org/repos/asf/hbase/blob/691efc60/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java
index bdf2ecc..9155fd6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java
@@ -55,7 +55,7 @@ import org.junit.experimental.categories.Category;
 
 /**
  * Testing writing a version 2 {@link HFile}. This is a low-level test written
- * during the development of {@link HFileWriterV2}.
+ * during the development of {@link HFileWriterImpl}.
  */
 @Category(SmallTests.class)
 public class TestHFileWriterV2 {
@@ -98,8 +98,7 @@ public class TestHFileWriterV2 {
                            .withBlockSize(4096)
                            .withCompression(compressAlgo)
                            .build();
-    HFileWriterV2 writer = (HFileWriterV2)
-        new HFileWriterV2.WriterFactoryV2(conf, new CacheConfig(conf))
+    HFile.Writer writer = new HFileWriterFactory(conf, new CacheConfig(conf))
             .withPath(fs, hfilePath)
             .withFileContext(context)
             .create();
@@ -135,7 +134,6 @@ public class TestHFileWriterV2 {
     FixedFileTrailer trailer =
         FixedFileTrailer.readFromStream(fsdis, fileSize);
 
-    assertEquals(2, trailer.getMajorVersion());
     assertEquals(entryCount, trailer.getEntryCount());
 
     HFileContext meta = new HFileContextBuilder()
@@ -176,8 +174,7 @@ public class TestHFileWriterV2 {
     // File info
     FileInfo fileInfo = new FileInfo();
     fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream());
-    byte [] keyValueFormatVersion = fileInfo.get(
-        HFileWriterV2.KEY_VALUE_VERSION);
+    byte [] keyValueFormatVersion = fileInfo.get(HFileWriterImpl.KEY_VALUE_VERSION);
     boolean includeMemstoreTS = keyValueFormatVersion != null &&
         Bytes.toInt(keyValueFormatVersion) > 0;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/691efc60/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java
index 76da924..627829f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java
@@ -59,8 +59,7 @@ import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
 
 /**
- * Testing writing a version 3 {@link HFile}. This is a low-level test written
- * during the development of {@link HFileWriterV3}.
+ * Testing writing a version 3 {@link HFile}.
  */
 @RunWith(Parameterized.class)
 @Category(SmallTests.class)
@@ -119,8 +118,7 @@ public class TestHFileWriterV3 {
                            .withBlockSize(4096)
                            .withIncludesTags(useTags)
                            .withCompression(compressAlgo).build();
-    HFileWriterV3 writer = (HFileWriterV3)
-        new HFileWriterV3.WriterFactoryV3(conf, new CacheConfig(conf))
+    HFile.Writer writer = new HFileWriterFactory(conf, new CacheConfig(conf))
             .withPath(fs, hfilePath)
             .withFileContext(context)
             .withComparator(KeyValue.COMPARATOR)
@@ -205,8 +203,7 @@ public class TestHFileWriterV3 {
     // File info
     FileInfo fileInfo = new FileInfo();
     fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream());
-    byte [] keyValueFormatVersion = fileInfo.get(
-        HFileWriterV3.KEY_VALUE_VERSION);
+    byte [] keyValueFormatVersion = fileInfo.get(HFileWriterImpl.KEY_VALUE_VERSION);
     boolean includeMemstoreTS = keyValueFormatVersion != null &&
         Bytes.toInt(keyValueFormatVersion) > 0;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/691efc60/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java
index b7f7fa1..731bb33 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java
@@ -88,8 +88,7 @@ public class TestLazyDataBlockDecompression {
    */
   private static void writeHFile(Configuration conf, CacheConfig cc, FileSystem fs, Path path,
       HFileContext cxt, int entryCount) throws IOException {
-    HFileWriterV2 writer = (HFileWriterV2)
-      new HFileWriterV2.WriterFactoryV2(conf, cc)
+    HFile.Writer writer = new HFileWriterFactory(conf, cc)
         .withPath(fs, path)
         .withFileContext(cxt)
         .create();
@@ -117,7 +116,7 @@ public class TestLazyDataBlockDecompression {
     long fileSize = fs.getFileStatus(path).getLen();
     FixedFileTrailer trailer =
       FixedFileTrailer.readFromStream(fsdis.getStream(false), fileSize);
-    HFileReaderV2 reader = new HFileReaderV2(path, trailer, fsdis, fileSize, cacheConfig,
+    HFile.Reader reader = new HFileReaderImpl(path, trailer, fsdis, fileSize, cacheConfig,
       fsdis.getHfs(), conf);
     reader.loadFileInfo();
     long offset = trailer.getFirstDataBlockOffset(),

http://git-wip-us.apache.org/repos/asf/hbase/blob/691efc60/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java
index a70e5ec..0e05180 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java
@@ -54,7 +54,6 @@ public class TestPrefetch {
   @Before
   public void setUp() throws IOException {
     conf = TEST_UTIL.getConfiguration();
-    conf.setInt(HFile.FORMAT_VERSION_KEY, 3);
     conf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, true);
     fs = HFileSystem.get(conf);
     CacheConfig.blockCacheDisabled = false;
@@ -69,10 +68,9 @@ public class TestPrefetch {
 
   private void readStoreFile(Path storeFilePath) throws Exception {
     // Open the file
-    HFileReaderV2 reader = (HFileReaderV2) HFile.createReader(fs,
-      storeFilePath, cacheConf, conf);
+    HFile.Reader reader = HFile.createReader(fs, storeFilePath, cacheConf, conf);
 
-    while (!((HFileReaderV3)reader).prefetchComplete()) {
+    while (!reader.prefetchComplete()) {
       // Sleep for a bit
       Thread.sleep(1000);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/691efc60/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java
index 54801e4..0947ecb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java
@@ -36,7 +36,7 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 /**
- * Test {@link HFileScanner#reseekTo(byte[])}
+ * Test {@link HFileScanner#reseekTo(org.apache.hadoop.hbase.Cell)}
  */
 @Category(SmallTests.class)
 public class TestReseekTo {

http://git-wip-us.apache.org/repos/asf/hbase/blob/691efc60/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
index a642e8d..e810529 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
@@ -70,12 +70,7 @@ public class TestSeekTo extends HBaseTestCase {
   }
 
   Path makeNewFile(TagUsage tagUsage) throws IOException {
-    Path ncTFile = new Path(this.testDir, "basic.hfile");
-    if (tagUsage != TagUsage.NO_TAG) {
-      conf.setInt("hfile.format.version", 3);
-    } else {
-      conf.setInt("hfile.format.version", 2);
-    }
+    Path ncTFile = new Path(testDir, "basic.hfile");
     FSDataOutputStream fout = this.fs.create(ncTFile);
     int blocksize = toKV("a", tagUsage).getLength() * 3;
     HFileContext context = new HFileContextBuilder().withBlockSize(blocksize)
@@ -138,7 +133,7 @@ public class TestSeekTo extends HBaseTestCase {
   }
 
   public void testSeekBeforeWithReSeekTo() throws Exception {
-    testSeekBeforeWithReSeekToInternals(TagUsage.NO_TAG);
+    testSeekBeforeInternals(TagUsage.NO_TAG);
     testSeekBeforeWithReSeekToInternals(TagUsage.ONLY_TAG);
     testSeekBeforeWithReSeekToInternals(TagUsage.PARTIAL_TAG);
   }
@@ -227,7 +222,7 @@ public class TestSeekTo extends HBaseTestCase {
   }
 
   public void testSeekTo() throws Exception {
-    testSeekToInternals(TagUsage.NO_TAG);
+    testSeekBeforeInternals(TagUsage.NO_TAG);
     testSeekToInternals(TagUsage.ONLY_TAG);
     testSeekToInternals(TagUsage.PARTIAL_TAG);
   }
@@ -255,7 +250,7 @@ public class TestSeekTo extends HBaseTestCase {
     reader.close();
   }
   public void testBlockContainingKey() throws Exception {
-    testBlockContainingKeyInternals(TagUsage.NO_TAG);
+    testSeekBeforeInternals(TagUsage.NO_TAG);
     testBlockContainingKeyInternals(TagUsage.ONLY_TAG);
     testBlockContainingKeyInternals(TagUsage.PARTIAL_TAG);
   }
@@ -264,7 +259,7 @@ public class TestSeekTo extends HBaseTestCase {
     Path p = makeNewFile(tagUsage);
     HFile.Reader reader = HFile.createReader(fs, p, new CacheConfig(conf), conf);
     reader.loadFileInfo();
-    HFileBlockIndex.BlockIndexReader blockIndexReader = 
+    HFileBlockIndex.BlockIndexReader blockIndexReader =
       reader.getDataBlockIndexReader();
     System.out.println(blockIndexReader.toString());
     // falls before the start of the file.

http://git-wip-us.apache.org/repos/asf/hbase/blob/691efc60/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java
index 1927334..6544c72 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java
@@ -49,7 +49,7 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFileBlock;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-import org.apache.hadoop.hbase.io.hfile.HFileReaderV2;
+import org.apache.hadoop.hbase.io.hfile.HFileReaderImpl;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.compress.CompressionOutputStream;
 import org.apache.hadoop.io.compress.Compressor;
@@ -602,8 +602,9 @@ public class DataBlockEncodingTool {
     // run the utilities
     DataBlockEncodingTool comp = new DataBlockEncodingTool(compressionName);
     int majorVersion = reader.getHFileVersion();
-    comp.useHBaseChecksum = majorVersion > 2
-        || (majorVersion == 2 && reader.getHFileMinorVersion() >= HFileReaderV2.MINOR_VERSION_WITH_CHECKSUM);
+    comp.useHBaseChecksum = majorVersion > 2 ||
+      (majorVersion == 2 &&
+       reader.getHFileMinorVersion() >= HFileReaderImpl.MINOR_VERSION_WITH_CHECKSUM);
     comp.checkStatistics(scanner, kvLimit);
     if (doVerify) {
       comp.verifyCodecs(scanner, kvLimit);

http://git-wip-us.apache.org/repos/asf/hbase/blob/691efc60/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
index 9a227ab..576e323 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
@@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.io.hfile.BlockType;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileBlock;
-import org.apache.hadoop.hbase.io.hfile.HFileReaderV2;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2;
 import org.apache.hadoop.hbase.wal.DefaultWALProvider;
@@ -220,7 +219,7 @@ public class TestCacheOnWriteInSchema {
     BlockCache cache = cacheConf.getBlockCache();
     StoreFile sf = new StoreFile(fs, path, conf, cacheConf,
       BloomType.ROWCOL);
-    HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader();
+    HFile.Reader reader = sf.createReader().getHFileReader();
     try {
       // Open a scanner with (on read) caching disabled
       HFileScanner scanner = reader.getScanner(false, false);


Mime
View raw message