Return-Path: X-Original-To: apmail-hbase-commits-archive@www.apache.org Delivered-To: apmail-hbase-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id B030817CE7 for ; Fri, 3 Apr 2015 22:25:42 +0000 (UTC) Received: (qmail 51847 invoked by uid 500); 3 Apr 2015 22:25:42 -0000 Delivered-To: apmail-hbase-commits-archive@hbase.apache.org Received: (qmail 51789 invoked by uid 500); 3 Apr 2015 22:25:42 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 51779 invoked by uid 99); 3 Apr 2015 22:25:42 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 03 Apr 2015 22:25:42 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 36640E2F46; Fri, 3 Apr 2015 22:25:42 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: stack@apache.org To: commits@hbase.apache.org Date: Fri, 03 Apr 2015 22:25:42 -0000 Message-Id: <3e53cda417b640c6bbab08fffad0e425@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [1/4] hbase git commit: HBASE-13373 Squash HFileReaderV3 together with HFileReaderV2 and AbstractHFileReader; ditto for Scanners and BlockReader, etc. Reapply after adding in the missing JIRA number Repository: hbase Updated Branches: refs/heads/master 319666ca5 -> 3a2a29616 http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2a2961/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java new file mode 100644 index 0000000..0555363 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java @@ -0,0 +1,641 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.io.hfile; + +import java.io.DataOutput; +import java.io.DataOutputStream; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValue.KVComparator; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.io.compress.Compression; +import org.apache.hadoop.hbase.io.crypto.Encryption; +import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; +import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; +import org.apache.hadoop.hbase.io.hfile.HFileBlock.BlockWritable; +import org.apache.hadoop.hbase.security.EncryptionUtil; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.util.BloomFilterWriter; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.io.Writable; + +/** + * Common functionality needed by all versions of {@link HFile} writers. + */ +@InterfaceAudience.Private +public class HFileWriterImpl implements HFile.Writer { + private static final Log LOG = LogFactory.getLog(HFileWriterImpl.class); + + /** The Cell previously appended. Becomes the last cell in the file.*/ + protected Cell lastCell = null; + + /** FileSystem stream to write into. */ + protected FSDataOutputStream outputStream; + + /** True if we opened the outputStream (and so will close it). */ + protected final boolean closeOutputStream; + + /** A "file info" block: a key-value map of file-wide metadata. */ + protected FileInfo fileInfo = new HFile.FileInfo(); + + /** Total # of key/value entries, i.e. how many times add() was called. */ + protected long entryCount = 0; + + /** Used for calculating the average key length. */ + protected long totalKeyLength = 0; + + /** Used for calculating the average value length. */ + protected long totalValueLength = 0; + + /** Total uncompressed bytes, maybe calculate a compression ratio later. */ + protected long totalUncompressedBytes = 0; + + /** Key comparator. Used to ensure we write in order. */ + protected final KVComparator comparator; + + /** Meta block names. */ + protected List metaNames = new ArrayList(); + + /** {@link Writable}s representing meta block data. */ + protected List metaData = new ArrayList(); + + /** + * First cell in a block. + * This reference should be short-lived since we write hfiles in a burst. + */ + protected Cell firstCellInBlock = null; + + + /** May be null if we were passed a stream. */ + protected final Path path; + + /** Cache configuration for caching data on write. */ + protected final CacheConfig cacheConf; + + /** + * Name for this object used when logging or in toString. Is either + * the result of a toString on stream or else name of passed file Path. + */ + protected final String name; + + /** + * The data block encoding which will be used. + * {@link NoOpDataBlockEncoder#INSTANCE} if there is no encoding. + */ + protected final HFileDataBlockEncoder blockEncoder; + + protected final HFileContext hFileContext; + + private int maxTagsLength = 0; + + /** KeyValue version in FileInfo */ + public static final byte [] KEY_VALUE_VERSION = Bytes.toBytes("KEY_VALUE_VERSION"); + + /** Version for KeyValue which includes memstore timestamp */ + public static final int KEY_VALUE_VER_WITH_MEMSTORE = 1; + + /** Inline block writers for multi-level block index and compound Blooms. */ + private List inlineBlockWriters = new ArrayList(); + + /** block writer */ + protected HFileBlock.Writer fsBlockWriter; + + private HFileBlockIndex.BlockIndexWriter dataBlockIndexWriter; + private HFileBlockIndex.BlockIndexWriter metaBlockIndexWriter; + + /** The offset of the first data block or -1 if the file is empty. */ + private long firstDataBlockOffset = -1; + + /** The offset of the last data block or 0 if the file is empty. */ + protected long lastDataBlockOffset; + + /** + * The last(stop) Cell of the previous data block. + * This reference should be short-lived since we write hfiles in a burst. + */ + private Cell lastCellOfPreviousBlock = null; + + /** Additional data items to be written to the "load-on-open" section. */ + private List additionalLoadOnOpenData = new ArrayList(); + + protected long maxMemstoreTS = 0; + + public HFileWriterImpl(final Configuration conf, CacheConfig cacheConf, Path path, + FSDataOutputStream outputStream, + KVComparator comparator, HFileContext fileContext) { + this.outputStream = outputStream; + this.path = path; + this.name = path != null ? path.getName() : outputStream.toString(); + this.hFileContext = fileContext; + DataBlockEncoding encoding = hFileContext.getDataBlockEncoding(); + if (encoding != DataBlockEncoding.NONE) { + this.blockEncoder = new HFileDataBlockEncoderImpl(encoding); + } else { + this.blockEncoder = NoOpDataBlockEncoder.INSTANCE; + } + this.comparator = comparator != null ? comparator + : KeyValue.COMPARATOR; + + closeOutputStream = path != null; + this.cacheConf = cacheConf; + finishInit(conf); + if (LOG.isTraceEnabled()) { + LOG.trace("Writer" + (path != null ? " for " + path : "") + + " initialized with cacheConf: " + cacheConf + + " comparator: " + comparator.getClass().getSimpleName() + + " fileContext: " + fileContext); + } + } + + /** + * Add to the file info. All added key/value pairs can be obtained using + * {@link HFile.Reader#loadFileInfo()}. + * + * @param k Key + * @param v Value + * @throws IOException in case the key or the value are invalid + */ + @Override + public void appendFileInfo(final byte[] k, final byte[] v) + throws IOException { + fileInfo.append(k, v, true); + } + + /** + * Sets the file info offset in the trailer, finishes up populating fields in + * the file info, and writes the file info into the given data output. The + * reason the data output is not always {@link #outputStream} is that we store + * file info as a block in version 2. + * + * @param trailer fixed file trailer + * @param out the data output to write the file info to + * @throws IOException + */ + protected final void writeFileInfo(FixedFileTrailer trailer, DataOutputStream out) + throws IOException { + trailer.setFileInfoOffset(outputStream.getPos()); + finishFileInfo(); + fileInfo.write(out); + } + + /** + * Checks that the given Cell's key does not violate the key order. + * + * @param cell Cell whose key to check. + * @return true if the key is duplicate + * @throws IOException if the key or the key order is wrong + */ + protected boolean checkKey(final Cell cell) throws IOException { + boolean isDuplicateKey = false; + + if (cell == null) { + throw new IOException("Key cannot be null or empty"); + } + if (lastCell != null) { + int keyComp = comparator.compareOnlyKeyPortion(lastCell, cell); + + if (keyComp > 0) { + throw new IOException("Added a key not lexically larger than" + + " previous. Current cell = " + cell + ", lastCell = " + lastCell); + } else if (keyComp == 0) { + isDuplicateKey = true; + } + } + return isDuplicateKey; + } + + /** Checks the given value for validity. */ + protected void checkValue(final byte[] value, final int offset, + final int length) throws IOException { + if (value == null) { + throw new IOException("Value cannot be null"); + } + } + + /** + * @return Path or null if we were passed a stream rather than a Path. + */ + @Override + public Path getPath() { + return path; + } + + @Override + public String toString() { + return "writer=" + (path != null ? path.toString() : null) + ", name=" + + name + ", compression=" + hFileContext.getCompression().getName(); + } + + public static Compression.Algorithm compressionByName(String algoName) { + if (algoName == null) + return HFile.DEFAULT_COMPRESSION_ALGORITHM; + return Compression.getCompressionAlgorithmByName(algoName); + } + + /** A helper method to create HFile output streams in constructors */ + protected static FSDataOutputStream createOutputStream(Configuration conf, + FileSystem fs, Path path, InetSocketAddress[] favoredNodes) throws IOException { + FsPermission perms = FSUtils.getFilePermissions(fs, conf, + HConstants.DATA_FILE_UMASK_KEY); + return FSUtils.create(fs, path, perms, favoredNodes); + } + + /** Additional initialization steps */ + protected void finishInit(final Configuration conf) { + if (fsBlockWriter != null) { + throw new IllegalStateException("finishInit called twice"); + } + + fsBlockWriter = new HFileBlock.Writer(blockEncoder, hFileContext); + + // Data block index writer + boolean cacheIndexesOnWrite = cacheConf.shouldCacheIndexesOnWrite(); + dataBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter(fsBlockWriter, + cacheIndexesOnWrite ? cacheConf : null, + cacheIndexesOnWrite ? name : null); + dataBlockIndexWriter.setMaxChunkSize( + HFileBlockIndex.getMaxChunkSize(conf)); + inlineBlockWriters.add(dataBlockIndexWriter); + + // Meta data block index writer + metaBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter(); + if (LOG.isTraceEnabled()) LOG.trace("Initialized with " + cacheConf); + } + + /** + * At a block boundary, write all the inline blocks and opens new block. + * + * @throws IOException + */ + protected void checkBlockBoundary() throws IOException { + if (fsBlockWriter.blockSizeWritten() < hFileContext.getBlocksize()) return; + finishBlock(); + writeInlineBlocks(false); + newBlock(); + } + + /** Clean up the current data block */ + private void finishBlock() throws IOException { + if (!fsBlockWriter.isWriting() || fsBlockWriter.blockSizeWritten() == 0) return; + + // Update the first data block offset for scanning. + if (firstDataBlockOffset == -1) { + firstDataBlockOffset = outputStream.getPos(); + } + // Update the last data block offset + lastDataBlockOffset = outputStream.getPos(); + fsBlockWriter.writeHeaderAndData(outputStream); + int onDiskSize = fsBlockWriter.getOnDiskSizeWithHeader(); + Cell indexEntry = + CellComparator.getMidpoint(this.comparator, lastCellOfPreviousBlock, firstCellInBlock); + dataBlockIndexWriter.addEntry(CellUtil.getCellKeySerializedAsKeyValueKey(indexEntry), + lastDataBlockOffset, onDiskSize); + totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader(); + if (cacheConf.shouldCacheDataOnWrite()) { + doCacheOnWrite(lastDataBlockOffset); + } + } + + /** Gives inline block writers an opportunity to contribute blocks. */ + private void writeInlineBlocks(boolean closing) throws IOException { + for (InlineBlockWriter ibw : inlineBlockWriters) { + while (ibw.shouldWriteBlock(closing)) { + long offset = outputStream.getPos(); + boolean cacheThisBlock = ibw.getCacheOnWrite(); + ibw.writeInlineBlock(fsBlockWriter.startWriting( + ibw.getInlineBlockType())); + fsBlockWriter.writeHeaderAndData(outputStream); + ibw.blockWritten(offset, fsBlockWriter.getOnDiskSizeWithHeader(), + fsBlockWriter.getUncompressedSizeWithoutHeader()); + totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader(); + + if (cacheThisBlock) { + doCacheOnWrite(offset); + } + } + } + } + + /** + * Caches the last written HFile block. + * @param offset the offset of the block we want to cache. Used to determine + * the cache key. + */ + private void doCacheOnWrite(long offset) { + HFileBlock cacheFormatBlock = fsBlockWriter.getBlockForCaching(cacheConf); + cacheConf.getBlockCache().cacheBlock(new BlockCacheKey(name, offset), cacheFormatBlock); + } + + /** + * Ready a new block for writing. + * + * @throws IOException + */ + protected void newBlock() throws IOException { + // This is where the next block begins. + fsBlockWriter.startWriting(BlockType.DATA); + firstCellInBlock = null; + if (lastCell != null) { + lastCellOfPreviousBlock = lastCell; + } + } + + /** + * Add a meta block to the end of the file. Call before close(). Metadata + * blocks are expensive. Fill one with a bunch of serialized data rather than + * do a metadata block per metadata instance. If metadata is small, consider + * adding to file info using {@link #appendFileInfo(byte[], byte[])} + * + * @param metaBlockName + * name of the block + * @param content + * will call readFields to get data later (DO NOT REUSE) + */ + @Override + public void appendMetaBlock(String metaBlockName, Writable content) { + byte[] key = Bytes.toBytes(metaBlockName); + int i; + for (i = 0; i < metaNames.size(); ++i) { + // stop when the current key is greater than our own + byte[] cur = metaNames.get(i); + if (Bytes.BYTES_RAWCOMPARATOR.compare(cur, 0, cur.length, key, 0, + key.length) > 0) { + break; + } + } + metaNames.add(i, key); + metaData.add(i, content); + } + + @Override + public void close() throws IOException { + if (outputStream == null) { + return; + } + // Save data block encoder metadata in the file info. + blockEncoder.saveMetadata(this); + // Write out the end of the data blocks, then write meta data blocks. + // followed by fileinfo, data block index and meta block index. + + finishBlock(); + writeInlineBlocks(true); + + FixedFileTrailer trailer = new FixedFileTrailer(getMajorVersion(), getMinorVersion()); + + // Write out the metadata blocks if any. + if (!metaNames.isEmpty()) { + for (int i = 0; i < metaNames.size(); ++i) { + // store the beginning offset + long offset = outputStream.getPos(); + // write the metadata content + DataOutputStream dos = fsBlockWriter.startWriting(BlockType.META); + metaData.get(i).write(dos); + + fsBlockWriter.writeHeaderAndData(outputStream); + totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader(); + + // Add the new meta block to the meta index. + metaBlockIndexWriter.addEntry(metaNames.get(i), offset, + fsBlockWriter.getOnDiskSizeWithHeader()); + } + } + + // Load-on-open section. + + // Data block index. + // + // In version 2, this section of the file starts with the root level data + // block index. We call a function that writes intermediate-level blocks + // first, then root level, and returns the offset of the root level block + // index. + + long rootIndexOffset = dataBlockIndexWriter.writeIndexBlocks(outputStream); + trailer.setLoadOnOpenOffset(rootIndexOffset); + + // Meta block index. + metaBlockIndexWriter.writeSingleLevelIndex(fsBlockWriter.startWriting( + BlockType.ROOT_INDEX), "meta"); + fsBlockWriter.writeHeaderAndData(outputStream); + totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader(); + + if (this.hFileContext.isIncludesMvcc()) { + appendFileInfo(MAX_MEMSTORE_TS_KEY, Bytes.toBytes(maxMemstoreTS)); + appendFileInfo(KEY_VALUE_VERSION, Bytes.toBytes(KEY_VALUE_VER_WITH_MEMSTORE)); + } + + // File info + writeFileInfo(trailer, fsBlockWriter.startWriting(BlockType.FILE_INFO)); + fsBlockWriter.writeHeaderAndData(outputStream); + totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader(); + + // Load-on-open data supplied by higher levels, e.g. Bloom filters. + for (BlockWritable w : additionalLoadOnOpenData){ + fsBlockWriter.writeBlock(w, outputStream); + totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader(); + } + + // Now finish off the trailer. + trailer.setNumDataIndexLevels(dataBlockIndexWriter.getNumLevels()); + trailer.setUncompressedDataIndexSize( + dataBlockIndexWriter.getTotalUncompressedSize()); + trailer.setFirstDataBlockOffset(firstDataBlockOffset); + trailer.setLastDataBlockOffset(lastDataBlockOffset); + trailer.setComparatorClass(comparator.getClass()); + trailer.setDataIndexCount(dataBlockIndexWriter.getNumRootEntries()); + + + finishClose(trailer); + + fsBlockWriter.release(); + } + + @Override + public void addInlineBlockWriter(InlineBlockWriter ibw) { + inlineBlockWriters.add(ibw); + } + + @Override + public void addGeneralBloomFilter(final BloomFilterWriter bfw) { + this.addBloomFilter(bfw, BlockType.GENERAL_BLOOM_META); + } + + @Override + public void addDeleteFamilyBloomFilter(final BloomFilterWriter bfw) { + this.addBloomFilter(bfw, BlockType.DELETE_FAMILY_BLOOM_META); + } + + private void addBloomFilter(final BloomFilterWriter bfw, + final BlockType blockType) { + if (bfw.getKeyCount() <= 0) + return; + + if (blockType != BlockType.GENERAL_BLOOM_META && + blockType != BlockType.DELETE_FAMILY_BLOOM_META) { + throw new RuntimeException("Block Type: " + blockType.toString() + + "is not supported"); + } + additionalLoadOnOpenData.add(new BlockWritable() { + @Override + public BlockType getBlockType() { + return blockType; + } + + @Override + public void writeToBlock(DataOutput out) throws IOException { + bfw.getMetaWriter().write(out); + Writable dataWriter = bfw.getDataWriter(); + if (dataWriter != null) + dataWriter.write(out); + } + }); + } + + @Override + public HFileContext getFileContext() { + return hFileContext; + } + + /** + * Add key/value to file. Keys must be added in an order that agrees with the + * Comparator passed on construction. + * + * @param cell + * Cell to add. Cannot be empty nor null. + * @throws IOException + */ + @Override + public void append(final Cell cell) throws IOException { + byte[] value = cell.getValueArray(); + int voffset = cell.getValueOffset(); + int vlength = cell.getValueLength(); + // checkKey uses comparator to check we are writing in order. + boolean dupKey = checkKey(cell); + checkValue(value, voffset, vlength); + if (!dupKey) { + checkBlockBoundary(); + } + + if (!fsBlockWriter.isWriting()) { + newBlock(); + } + + fsBlockWriter.write(cell); + + totalKeyLength += CellUtil.estimatedSerializedSizeOfKey(cell); + totalValueLength += vlength; + + // Are we the first key in this block? + if (firstCellInBlock == null) { + // If cell is big, block will be closed and this firstCellInBlock reference will only last + // a short while. + firstCellInBlock = cell; + } + + // TODO: What if cell is 10MB and we write infrequently? We hold on to cell here indefinetly? + lastCell = cell; + entryCount++; + this.maxMemstoreTS = Math.max(this.maxMemstoreTS, cell.getSequenceId()); + int tagsLength = cell.getTagsLength(); + if (tagsLength > this.maxTagsLength) { + this.maxTagsLength = tagsLength; + } + } + + protected void finishFileInfo() throws IOException { + if (lastCell != null) { + // Make a copy. The copy is stuffed into our fileinfo map. Needs a clean + // byte buffer. Won't take a tuple. + byte [] lastKey = CellUtil.getCellKeySerializedAsKeyValueKey(this.lastCell); + fileInfo.append(FileInfo.LASTKEY, lastKey, false); + } + + // Average key length. + int avgKeyLen = + entryCount == 0 ? 0 : (int) (totalKeyLength / entryCount); + fileInfo.append(FileInfo.AVG_KEY_LEN, Bytes.toBytes(avgKeyLen), false); + fileInfo.append(FileInfo.CREATE_TIME_TS, Bytes.toBytes(hFileContext.getFileCreateTime()), + false); + + // Average value length. + int avgValueLen = + entryCount == 0 ? 0 : (int) (totalValueLength / entryCount); + fileInfo.append(FileInfo.AVG_VALUE_LEN, Bytes.toBytes(avgValueLen), false); + if (hFileContext.getDataBlockEncoding() == DataBlockEncoding.PREFIX_TREE) { + // In case of Prefix Tree encoding, we always write tags information into HFiles even if all + // KVs are having no tags. + fileInfo.append(FileInfo.MAX_TAGS_LEN, Bytes.toBytes(this.maxTagsLength), false); + } else if (hFileContext.isIncludesTags()) { + // When tags are not being written in this file, MAX_TAGS_LEN is excluded + // from the FileInfo + fileInfo.append(FileInfo.MAX_TAGS_LEN, Bytes.toBytes(this.maxTagsLength), false); + boolean tagsCompressed = (hFileContext.getDataBlockEncoding() != DataBlockEncoding.NONE) + && hFileContext.isCompressTags(); + fileInfo.append(FileInfo.TAGS_COMPRESSED, Bytes.toBytes(tagsCompressed), false); + } + } + + protected int getMajorVersion() { + return 3; + } + + protected int getMinorVersion() { + return HFileReaderImpl.MAX_MINOR_VERSION; + } + + protected void finishClose(FixedFileTrailer trailer) throws IOException { + // Write out encryption metadata before finalizing if we have a valid crypto context + Encryption.Context cryptoContext = hFileContext.getEncryptionContext(); + if (cryptoContext != Encryption.Context.NONE) { + // Wrap the context's key and write it as the encryption metadata, the wrapper includes + // all information needed for decryption + trailer.setEncryptionKey(EncryptionUtil.wrapKey(cryptoContext.getConf(), + cryptoContext.getConf().get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, + User.getCurrent().getShortName()), + cryptoContext.getKey())); + } + // Now we can finish the close + trailer.setMetaIndexCount(metaNames.size()); + trailer.setTotalUncompressedBytes(totalUncompressedBytes+ trailer.getTrailerSize()); + trailer.setEntryCount(entryCount); + trailer.setCompressionCodec(hFileContext.getCompression()); + + trailer.serialize(outputStream); + + if (closeOutputStream) { + outputStream.close(); + outputStream = null; + } + } +} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2a2961/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java deleted file mode 100644 index 28c4655..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java +++ /dev/null @@ -1,424 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.io.hfile; - -import java.io.DataOutput; -import java.io.DataOutputStream; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellComparator; -import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.KeyValue.KVComparator; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.io.hfile.HFile.Writer; -import org.apache.hadoop.hbase.io.hfile.HFileBlock.BlockWritable; -import org.apache.hadoop.hbase.util.BloomFilterWriter; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.io.Writable; - -/** - * Writes HFile format version 2. - */ -@InterfaceAudience.Private -public class HFileWriterV2 extends AbstractHFileWriter { - static final Log LOG = LogFactory.getLog(HFileWriterV2.class); - - /** Max memstore (mvcc) timestamp in FileInfo */ - public static final byte [] MAX_MEMSTORE_TS_KEY = - Bytes.toBytes("MAX_MEMSTORE_TS_KEY"); - - /** KeyValue version in FileInfo */ - public static final byte [] KEY_VALUE_VERSION = - Bytes.toBytes("KEY_VALUE_VERSION"); - - /** Version for KeyValue which includes memstore timestamp */ - public static final int KEY_VALUE_VER_WITH_MEMSTORE = 1; - - /** Inline block writers for multi-level block index and compound Blooms. */ - private List inlineBlockWriters = - new ArrayList(); - - /** Unified version 2 block writer */ - protected HFileBlock.Writer fsBlockWriter; - - private HFileBlockIndex.BlockIndexWriter dataBlockIndexWriter; - private HFileBlockIndex.BlockIndexWriter metaBlockIndexWriter; - - /** The offset of the first data block or -1 if the file is empty. */ - private long firstDataBlockOffset = -1; - - /** The offset of the last data block or 0 if the file is empty. */ - protected long lastDataBlockOffset; - - /** - * The last(stop) Cell of the previous data block. - * This reference should be short-lived since we write hfiles in a burst. - */ - private Cell lastCellOfPreviousBlock = null; - - /** Additional data items to be written to the "load-on-open" section. */ - private List additionalLoadOnOpenData = - new ArrayList(); - - protected long maxMemstoreTS = 0; - - static class WriterFactoryV2 extends HFile.WriterFactory { - WriterFactoryV2(Configuration conf, CacheConfig cacheConf) { - super(conf, cacheConf); - } - - @Override - public Writer createWriter(FileSystem fs, Path path, - FSDataOutputStream ostream, - KVComparator comparator, HFileContext context) throws IOException { - context.setIncludesTags(false);// HFile V2 does not deal with tags at all! - return new HFileWriterV2(conf, cacheConf, fs, path, ostream, - comparator, context); - } - } - - /** Constructor that takes a path, creates and closes the output stream. */ - public HFileWriterV2(Configuration conf, CacheConfig cacheConf, - FileSystem fs, Path path, FSDataOutputStream ostream, - final KVComparator comparator, final HFileContext context) throws IOException { - super(cacheConf, - ostream == null ? createOutputStream(conf, fs, path, null) : ostream, - path, comparator, context); - finishInit(conf); - } - - /** Additional initialization steps */ - protected void finishInit(final Configuration conf) { - if (fsBlockWriter != null) - throw new IllegalStateException("finishInit called twice"); - - fsBlockWriter = new HFileBlock.Writer(blockEncoder, hFileContext); - - // Data block index writer - boolean cacheIndexesOnWrite = cacheConf.shouldCacheIndexesOnWrite(); - dataBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter(fsBlockWriter, - cacheIndexesOnWrite ? cacheConf : null, - cacheIndexesOnWrite ? name : null); - dataBlockIndexWriter.setMaxChunkSize( - HFileBlockIndex.getMaxChunkSize(conf)); - inlineBlockWriters.add(dataBlockIndexWriter); - - // Meta data block index writer - metaBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter(); - if (LOG.isTraceEnabled()) LOG.trace("Initialized with " + cacheConf); - } - - /** - * At a block boundary, write all the inline blocks and opens new block. - * - * @throws IOException - */ - protected void checkBlockBoundary() throws IOException { - if (fsBlockWriter.blockSizeWritten() < hFileContext.getBlocksize()) - return; - - finishBlock(); - writeInlineBlocks(false); - newBlock(); - } - - /** Clean up the current data block */ - private void finishBlock() throws IOException { - if (!fsBlockWriter.isWriting() || fsBlockWriter.blockSizeWritten() == 0) - return; - - // Update the first data block offset for scanning. - if (firstDataBlockOffset == -1) { - firstDataBlockOffset = outputStream.getPos(); - } - // Update the last data block offset - lastDataBlockOffset = outputStream.getPos(); - fsBlockWriter.writeHeaderAndData(outputStream); - int onDiskSize = fsBlockWriter.getOnDiskSizeWithHeader(); - - Cell indexEntry = - CellComparator.getMidpoint(this.comparator, lastCellOfPreviousBlock, firstCellInBlock); - dataBlockIndexWriter.addEntry(CellUtil.getCellKeySerializedAsKeyValueKey(indexEntry), - lastDataBlockOffset, onDiskSize); - totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader(); - if (cacheConf.shouldCacheDataOnWrite()) { - doCacheOnWrite(lastDataBlockOffset); - } - } - - /** Gives inline block writers an opportunity to contribute blocks. */ - private void writeInlineBlocks(boolean closing) throws IOException { - for (InlineBlockWriter ibw : inlineBlockWriters) { - while (ibw.shouldWriteBlock(closing)) { - long offset = outputStream.getPos(); - boolean cacheThisBlock = ibw.getCacheOnWrite(); - ibw.writeInlineBlock(fsBlockWriter.startWriting( - ibw.getInlineBlockType())); - fsBlockWriter.writeHeaderAndData(outputStream); - ibw.blockWritten(offset, fsBlockWriter.getOnDiskSizeWithHeader(), - fsBlockWriter.getUncompressedSizeWithoutHeader()); - totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader(); - - if (cacheThisBlock) { - doCacheOnWrite(offset); - } - } - } - } - - /** - * Caches the last written HFile block. - * @param offset the offset of the block we want to cache. Used to determine - * the cache key. - */ - private void doCacheOnWrite(long offset) { - HFileBlock cacheFormatBlock = fsBlockWriter.getBlockForCaching(cacheConf); - cacheConf.getBlockCache().cacheBlock( - new BlockCacheKey(name, offset), cacheFormatBlock); - } - - /** - * Ready a new block for writing. - * - * @throws IOException - */ - protected void newBlock() throws IOException { - // This is where the next block begins. - fsBlockWriter.startWriting(BlockType.DATA); - firstCellInBlock = null; - if (lastCell != null) { - lastCellOfPreviousBlock = lastCell; - } - } - - /** - * Add a meta block to the end of the file. Call before close(). Metadata - * blocks are expensive. Fill one with a bunch of serialized data rather than - * do a metadata block per metadata instance. If metadata is small, consider - * adding to file info using {@link #appendFileInfo(byte[], byte[])} - * - * @param metaBlockName - * name of the block - * @param content - * will call readFields to get data later (DO NOT REUSE) - */ - @Override - public void appendMetaBlock(String metaBlockName, Writable content) { - byte[] key = Bytes.toBytes(metaBlockName); - int i; - for (i = 0; i < metaNames.size(); ++i) { - // stop when the current key is greater than our own - byte[] cur = metaNames.get(i); - if (Bytes.BYTES_RAWCOMPARATOR.compare(cur, 0, cur.length, key, 0, - key.length) > 0) { - break; - } - } - metaNames.add(i, key); - metaData.add(i, content); - } - - /** - * Add key/value to file. Keys must be added in an order that agrees with the - * Comparator passed on construction. - * - * @param cell Cell to add. Cannot be empty nor null. - * @throws IOException - */ - @Override - public void append(final Cell cell) throws IOException { - byte[] value = cell.getValueArray(); - int voffset = cell.getValueOffset(); - int vlength = cell.getValueLength(); - // checkKey uses comparator to check we are writing in order. - boolean dupKey = checkKey(cell); - checkValue(value, voffset, vlength); - if (!dupKey) { - checkBlockBoundary(); - } - - if (!fsBlockWriter.isWriting()) { - newBlock(); - } - - fsBlockWriter.write(cell); - - totalKeyLength += CellUtil.estimatedSerializedSizeOfKey(cell); - totalValueLength += vlength; - - // Are we the first key in this block? - if (firstCellInBlock == null) { - // If cell is big, block will be closed and this firstCellInBlock reference will only last - // a short while. - firstCellInBlock = cell; - } - - // TODO: What if cell is 10MB and we write infrequently? We'll hold on to the cell here - // indefinetly? - lastCell = cell; - entryCount++; - this.maxMemstoreTS = Math.max(this.maxMemstoreTS, cell.getSequenceId()); - } - - @Override - public void close() throws IOException { - if (outputStream == null) { - return; - } - // Save data block encoder metadata in the file info. - blockEncoder.saveMetadata(this); - // Write out the end of the data blocks, then write meta data blocks. - // followed by fileinfo, data block index and meta block index. - - finishBlock(); - writeInlineBlocks(true); - - FixedFileTrailer trailer = new FixedFileTrailer(getMajorVersion(), getMinorVersion()); - - // Write out the metadata blocks if any. - if (!metaNames.isEmpty()) { - for (int i = 0; i < metaNames.size(); ++i) { - // store the beginning offset - long offset = outputStream.getPos(); - // write the metadata content - DataOutputStream dos = fsBlockWriter.startWriting(BlockType.META); - metaData.get(i).write(dos); - - fsBlockWriter.writeHeaderAndData(outputStream); - totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader(); - - // Add the new meta block to the meta index. - metaBlockIndexWriter.addEntry(metaNames.get(i), offset, - fsBlockWriter.getOnDiskSizeWithHeader()); - } - } - - // Load-on-open section. - - // Data block index. - // - // In version 2, this section of the file starts with the root level data - // block index. We call a function that writes intermediate-level blocks - // first, then root level, and returns the offset of the root level block - // index. - - long rootIndexOffset = dataBlockIndexWriter.writeIndexBlocks(outputStream); - trailer.setLoadOnOpenOffset(rootIndexOffset); - - // Meta block index. - metaBlockIndexWriter.writeSingleLevelIndex(fsBlockWriter.startWriting( - BlockType.ROOT_INDEX), "meta"); - fsBlockWriter.writeHeaderAndData(outputStream); - totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader(); - - if (this.hFileContext.isIncludesMvcc()) { - appendFileInfo(MAX_MEMSTORE_TS_KEY, Bytes.toBytes(maxMemstoreTS)); - appendFileInfo(KEY_VALUE_VERSION, Bytes.toBytes(KEY_VALUE_VER_WITH_MEMSTORE)); - } - - // File info - writeFileInfo(trailer, fsBlockWriter.startWriting(BlockType.FILE_INFO)); - fsBlockWriter.writeHeaderAndData(outputStream); - totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader(); - - // Load-on-open data supplied by higher levels, e.g. Bloom filters. - for (BlockWritable w : additionalLoadOnOpenData){ - fsBlockWriter.writeBlock(w, outputStream); - totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader(); - } - - // Now finish off the trailer. - trailer.setNumDataIndexLevels(dataBlockIndexWriter.getNumLevels()); - trailer.setUncompressedDataIndexSize( - dataBlockIndexWriter.getTotalUncompressedSize()); - trailer.setFirstDataBlockOffset(firstDataBlockOffset); - trailer.setLastDataBlockOffset(lastDataBlockOffset); - trailer.setComparatorClass(comparator.getClass()); - trailer.setDataIndexCount(dataBlockIndexWriter.getNumRootEntries()); - - - finishClose(trailer); - - fsBlockWriter.release(); - } - - @Override - public void addInlineBlockWriter(InlineBlockWriter ibw) { - inlineBlockWriters.add(ibw); - } - - @Override - public void addGeneralBloomFilter(final BloomFilterWriter bfw) { - this.addBloomFilter(bfw, BlockType.GENERAL_BLOOM_META); - } - - @Override - public void addDeleteFamilyBloomFilter(final BloomFilterWriter bfw) { - this.addBloomFilter(bfw, BlockType.DELETE_FAMILY_BLOOM_META); - } - - private void addBloomFilter(final BloomFilterWriter bfw, - final BlockType blockType) { - if (bfw.getKeyCount() <= 0) - return; - - if (blockType != BlockType.GENERAL_BLOOM_META && - blockType != BlockType.DELETE_FAMILY_BLOOM_META) { - throw new RuntimeException("Block Type: " + blockType.toString() + - "is not supported"); - } - additionalLoadOnOpenData.add(new BlockWritable() { - @Override - public BlockType getBlockType() { - return blockType; - } - - @Override - public void writeToBlock(DataOutput out) throws IOException { - bfw.getMetaWriter().write(out); - Writable dataWriter = bfw.getDataWriter(); - if (dataWriter != null) - dataWriter.write(out); - } - }); - } - - protected int getMajorVersion() { - return 2; - } - - protected int getMinorVersion() { - return HFileReaderV2.MAX_MINOR_VERSION; - } - - @Override - public HFileContext getFileContext() { - return hFileContext; - } -} http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2a2961/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java deleted file mode 100644 index 086395c..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.io.hfile; - -import java.io.IOException; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.KeyValue.KVComparator; -import org.apache.hadoop.hbase.io.crypto.Encryption; -import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; -import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; -import org.apache.hadoop.hbase.io.hfile.HFile.Writer; -import org.apache.hadoop.hbase.security.EncryptionUtil; -import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.util.Bytes; - -/** - * {@link HFile} writer for version 3. - */ -@InterfaceAudience.Private -public class HFileWriterV3 extends HFileWriterV2 { - - private static final Log LOG = LogFactory.getLog(HFileWriterV3.class); - - private int maxTagsLength = 0; - - static class WriterFactoryV3 extends HFile.WriterFactory { - WriterFactoryV3(Configuration conf, CacheConfig cacheConf) { - super(conf, cacheConf); - } - - @Override - public Writer createWriter(FileSystem fs, Path path, FSDataOutputStream ostream, - final KVComparator comparator, HFileContext fileContext) - throws IOException { - return new HFileWriterV3(conf, cacheConf, fs, path, ostream, comparator, fileContext); - } - } - - /** Constructor that takes a path, creates and closes the output stream. */ - public HFileWriterV3(Configuration conf, CacheConfig cacheConf, FileSystem fs, Path path, - FSDataOutputStream ostream, final KVComparator comparator, - final HFileContext fileContext) throws IOException { - super(conf, cacheConf, fs, path, ostream, comparator, fileContext); - if (LOG.isTraceEnabled()) { - LOG.trace("Writer" + (path != null ? " for " + path : "") + - " initialized with cacheConf: " + cacheConf + - " comparator: " + comparator.getClass().getSimpleName() + - " fileContext: " + fileContext); - } - } - - /** - * Add key/value to file. Keys must be added in an order that agrees with the - * Comparator passed on construction. - * - * @param cell - * Cell to add. Cannot be empty nor null. - * @throws IOException - */ - @Override - public void append(final Cell cell) throws IOException { - // Currently get the complete arrays - super.append(cell); - int tagsLength = cell.getTagsLength(); - if (tagsLength > this.maxTagsLength) { - this.maxTagsLength = tagsLength; - } - } - - protected void finishFileInfo() throws IOException { - super.finishFileInfo(); - if (hFileContext.getDataBlockEncoding() == DataBlockEncoding.PREFIX_TREE) { - // In case of Prefix Tree encoding, we always write tags information into HFiles even if all - // KVs are having no tags. - fileInfo.append(FileInfo.MAX_TAGS_LEN, Bytes.toBytes(this.maxTagsLength), false); - } else if (hFileContext.isIncludesTags()) { - // When tags are not being written in this file, MAX_TAGS_LEN is excluded - // from the FileInfo - fileInfo.append(FileInfo.MAX_TAGS_LEN, Bytes.toBytes(this.maxTagsLength), false); - boolean tagsCompressed = (hFileContext.getDataBlockEncoding() != DataBlockEncoding.NONE) - && hFileContext.isCompressTags(); - fileInfo.append(FileInfo.TAGS_COMPRESSED, Bytes.toBytes(tagsCompressed), false); - } - } - - @Override - protected int getMajorVersion() { - return 3; - } - - @Override - protected int getMinorVersion() { - return HFileReaderV3.MAX_MINOR_VERSION; - } - - @Override - protected void finishClose(FixedFileTrailer trailer) throws IOException { - // Write out encryption metadata before finalizing if we have a valid crypto context - Encryption.Context cryptoContext = hFileContext.getEncryptionContext(); - if (cryptoContext != Encryption.Context.NONE) { - // Wrap the context's key and write it as the encryption metadata, the wrapper includes - // all information needed for decryption - trailer.setEncryptionKey(EncryptionUtil.wrapKey(cryptoContext.getConf(), - cryptoContext.getConf().get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, - User.getCurrent().getShortName()), - cryptoContext.getKey())); - } - // Now we can finish the close - super.finishClose(trailer); - } - -} http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2a2961/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java index f2d5c6f..26ae097 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java @@ -51,7 +51,7 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; -import org.apache.hadoop.hbase.io.hfile.AbstractHFileWriter; +import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; @@ -129,7 +129,7 @@ public class HFileOutputFormat2 // Invented config. Add to hbase-*.xml if other than default compression. final String defaultCompressionStr = conf.get("hfile.compression", Compression.Algorithm.NONE.getName()); - final Algorithm defaultCompression = AbstractHFileWriter + final Algorithm defaultCompression = HFileWriterImpl .compressionByName(defaultCompressionStr); final boolean compactionExclude = conf.getBoolean( "hbase.mapreduce.hfileoutputformat.compaction.exclude", false); @@ -483,7 +483,7 @@ public class HFileOutputFormat2 Map compressionMap = new TreeMap(Bytes.BYTES_COMPARATOR); for (Map.Entry e : stringMap.entrySet()) { - Algorithm algorithm = AbstractHFileWriter.compressionByName(e.getValue()); + Algorithm algorithm = HFileWriterImpl.compressionByName(e.getValue()); compressionMap.put(e.getKey(), algorithm); } return compressionMap; http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2a2961/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index d4354b0..a515f8e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -93,6 +93,7 @@ import org.apache.hadoop.hbase.executor.ExecutorType; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.http.InfoServer; import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.ipc.RpcClient; import org.apache.hadoop.hbase.ipc.RpcClientFactory; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; @@ -493,6 +494,7 @@ public class HRegionServer extends HasThread implements throws IOException { this.fsOk = true; this.conf = conf; + HFile.checkHFileVersion(this.conf); checkCodecs(this.conf); this.userProvider = UserProvider.instantiate(conf); FSUtils.setupShortCircuitRead(this.conf); http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2a2961/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java index 8910042..9a33b64 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java @@ -588,7 +588,7 @@ public interface Region extends ConfigurationObserver { byte[] now) throws IOException; /** - * Replace any cell timestamps set to HConstants#LATEST_TIMESTAMP with the + * Replace any cell timestamps set to {@link org.apache.hadoop.hbase.HConstants#LATEST_TIMESTAMP} * provided current timestamp. * @param values * @param now http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2a2961/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index c1a6b76..345dd9b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -51,7 +51,6 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileScanner; -import org.apache.hadoop.hbase.io.hfile.HFileWriterV2; import org.apache.hadoop.hbase.regionserver.compactions.Compactor; import org.apache.hadoop.hbase.util.BloomFilter; import org.apache.hadoop.hbase.util.BloomFilterFactory; @@ -409,7 +408,7 @@ public class StoreFile { } this.reader.setSequenceID(this.sequenceid); - b = metadataMap.get(HFileWriterV2.MAX_MEMSTORE_TS_KEY); + b = metadataMap.get(HFile.Writer.MAX_MEMSTORE_TS_KEY); if (b != null) { this.maxMemstoreTS = Bytes.toLong(b); } http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2a2961/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java index 3c3ea6b..ae820b5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java @@ -34,8 +34,8 @@ import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.compress.Compression; +import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; -import org.apache.hadoop.hbase.io.hfile.HFileWriterV2; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.InternalScanner.NextState; @@ -142,7 +142,7 @@ public abstract class Compactor { fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, r.getSequenceID()); } else { - tmp = fileInfo.get(HFileWriterV2.MAX_MEMSTORE_TS_KEY); + tmp = fileInfo.get(HFile.Writer.MAX_MEMSTORE_TS_KEY); if (tmp != null) { fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, Bytes.toLong(tmp)); } http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2a2961/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java index cdef12f..a9cc1c6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.io.compress.Compression; -import org.apache.hadoop.hbase.io.hfile.AbstractHFileWriter; +import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; @@ -120,7 +120,7 @@ public class CompressionTest { throws Exception { Configuration conf = HBaseConfiguration.create(); HFileContext context = new HFileContextBuilder() - .withCompression(AbstractHFileWriter.compressionByName(codec)).build(); + .withCompression(HFileWriterImpl.compressionByName(codec)).build(); HFile.Writer writer = HFile.getWriterFactoryNoCache(conf) .withPath(fs, path) .withFileContext(context) http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2a2961/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java index ea10f60..cb12bea 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java @@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.crypto.Encryption; import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting; import org.apache.hadoop.hbase.io.crypto.aes.AES; -import org.apache.hadoop.hbase.io.hfile.AbstractHFileWriter; +import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; @@ -326,7 +326,7 @@ public class HFilePerformanceEvaluation { void setUp() throws Exception { HFileContextBuilder builder = new HFileContextBuilder() - .withCompression(AbstractHFileWriter.compressionByName(codec)) + .withCompression(HFileWriterImpl.compressionByName(codec)) .withBlockSize(RFILE_BLOCKSIZE); if (cipher == "aes") { http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2a2961/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java index 00639cf..0622f55 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java @@ -242,7 +242,6 @@ public class TestCacheOnWrite { public void setUp() throws IOException { conf = TEST_UTIL.getConfiguration(); this.conf.set("dfs.datanode.data.dir.perm", "700"); - conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION); conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, INDEX_BLOCK_SIZE); conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE, BLOOM_BLOCK_SIZE); @@ -272,12 +271,7 @@ public class TestCacheOnWrite { } private void readStoreFile(boolean useTags) throws IOException { - AbstractHFileReader reader; - if (useTags) { - reader = (HFileReaderV3) HFile.createReader(fs, storeFilePath, cacheConf, conf); - } else { - reader = (HFileReaderV2) HFile.createReader(fs, storeFilePath, cacheConf, conf); - } + HFile.Reader reader = HFile.createReader(fs, storeFilePath, cacheConf, conf); LOG.info("HFile information: " + reader); HFileContext meta = new HFileContextBuilder().withCompression(compress) .withBytesPerCheckSum(CKBYTES).withChecksumType(ChecksumType.NULL) @@ -378,11 +372,6 @@ public class TestCacheOnWrite { } private void writeStoreFile(boolean useTags) throws IOException { - if(useTags) { - TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3); - } else { - TEST_UTIL.getConfiguration().setInt("hfile.format.version", 2); - } Path storeFileParentDir = new Path(TEST_UTIL.getDataTestDir(), "test_cache_on_write"); HFileContext meta = new HFileContextBuilder().withCompression(compress) @@ -422,11 +411,6 @@ public class TestCacheOnWrite { private void testNotCachingDataBlocksDuringCompactionInternals(boolean useTags) throws IOException, InterruptedException { - if (useTags) { - TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3); - } else { - TEST_UTIL.getConfiguration().setInt("hfile.format.version", 2); - } // TODO: need to change this test if we add a cache size threshold for // compactions, or if we implement some other kind of intelligent logic for // deciding what blocks to cache-on-write on compaction. http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2a2961/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java index 1b6731a..97784cb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java @@ -55,7 +55,7 @@ public class TestFixedFileTrailer { private static final int MAX_COMPARATOR_NAME_LENGTH = 128; /** - * The number of used fields by version. Indexed by version minus two. + * The number of used fields by version. Indexed by version minus two. * Min version that we support is V2 */ private static final int[] NUM_FIELDS_BY_VERSION = new int[] { 14, 15 }; @@ -89,8 +89,8 @@ public class TestFixedFileTrailer { @Test public void testTrailer() throws IOException { - FixedFileTrailer t = new FixedFileTrailer(version, - HFileReaderV2.PBUF_TRAILER_MINOR_VERSION); + FixedFileTrailer t = new FixedFileTrailer(version, + HFileReaderImpl.PBUF_TRAILER_MINOR_VERSION); t.setDataIndexCount(3); t.setEntryCount(((long) Integer.MAX_VALUE) + 1); @@ -122,8 +122,8 @@ public class TestFixedFileTrailer { // Finished writing, trying to read. { DataInputStream dis = new DataInputStream(bais); - FixedFileTrailer t2 = new FixedFileTrailer(version, - HFileReaderV2.PBUF_TRAILER_MINOR_VERSION); + FixedFileTrailer t2 = new FixedFileTrailer(version, + HFileReaderImpl.PBUF_TRAILER_MINOR_VERSION); t2.deserialize(dis); assertEquals(-1, bais.read()); // Ensure we have read everything. checkLoadedTrailer(version, t, t2); @@ -167,12 +167,12 @@ public class TestFixedFileTrailer { trailerStr.split(", ").length); assertEquals(trailerStr, t4.toString()); } - + @Test public void testTrailerForV2NonPBCompatibility() throws Exception { if (version == 2) { FixedFileTrailer t = new FixedFileTrailer(version, - HFileReaderV2.MINOR_VERSION_NO_CHECKSUM); + HFileReaderImpl.MINOR_VERSION_NO_CHECKSUM); t.setDataIndexCount(3); t.setEntryCount(((long) Integer.MAX_VALUE) + 1); t.setLastDataBlockOffset(291); @@ -199,7 +199,7 @@ public class TestFixedFileTrailer { { DataInputStream dis = new DataInputStream(bais); FixedFileTrailer t2 = new FixedFileTrailer(version, - HFileReaderV2.MINOR_VERSION_NO_CHECKSUM); + HFileReaderImpl.MINOR_VERSION_NO_CHECKSUM); t2.deserialize(dis); assertEquals(-1, bais.read()); // Ensure we have read everything. checkLoadedTrailer(version, t, t2); @@ -228,7 +228,7 @@ public class TestFixedFileTrailer { output.writeInt(FixedFileTrailer.materializeVersion(fft.getMajorVersion(), fft.getMinorVersion())); } - + private FixedFileTrailer readTrailer(Path trailerPath) throws IOException { FSDataInputStream fsdis = fs.open(trailerPath); http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2a2961/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java index 7625842..cf2aca5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java @@ -82,8 +82,6 @@ public class TestForceCacheImportantBlocks { public static Collection parameters() { // HFile versions return Arrays.asList( - new Object[] { 2, true }, - new Object[] { 2, false }, new Object[] { 3, true }, new Object[] { 3, false } ); http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2a2961/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java index 3855629..9e4b1c7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java @@ -246,7 +246,7 @@ public class TestHFile extends HBaseTestCase { FSDataOutputStream fout = createFSOutput(ncTFile); HFileContext meta = new HFileContextBuilder() .withBlockSize(minBlockSize) - .withCompression(AbstractHFileWriter.compressionByName(codec)) + .withCompression(HFileWriterImpl.compressionByName(codec)) .build(); Writer writer = HFile.getWriterFactory(conf, cacheConf) .withOutputStream(fout) @@ -339,7 +339,7 @@ public class TestHFile extends HBaseTestCase { Path mFile = new Path(ROOT_DIR, "meta.hfile"); FSDataOutputStream fout = createFSOutput(mFile); HFileContext meta = new HFileContextBuilder() - .withCompression(AbstractHFileWriter.compressionByName(compress)) + .withCompression(HFileWriterImpl.compressionByName(compress)) .withBlockSize(minBlockSize).build(); Writer writer = HFile.getWriterFactory(conf, cacheConf) .withOutputStream(fout) http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2a2961/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java index 939c019..0ee9d14 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java @@ -590,7 +590,7 @@ public class TestHFileBlockIndex { } // Manually compute the mid-key and validate it. - HFileReaderV2 reader2 = (HFileReaderV2) reader; + HFile.Reader reader2 = reader; HFileBlock.FSReader fsReader = reader2.getUncachedBlockReader(); HFileBlock.BlockIterator iter = fsReader.blockRange(0, http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2a2961/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java index c0683f8..ab811f1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java @@ -55,8 +55,7 @@ public class TestHFileInlineToRootChunkConversion { CacheConfig cacheConf = new CacheConfig(conf); conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, maxChunkSize); HFileContext context = new HFileContextBuilder().withBlockSize(16).build(); - HFileWriterV2 hfw = - (HFileWriterV2) new HFileWriterV2.WriterFactoryV2(conf, cacheConf) + HFile.Writer hfw = new HFileWriterFactory(conf, cacheConf) .withFileContext(context) .withPath(fs, hfPath).create(); List keys = new ArrayList(); @@ -78,7 +77,7 @@ public class TestHFileInlineToRootChunkConversion { } hfw.close(); - HFileReaderV2 reader = (HFileReaderV2) HFile.createReader(fs, hfPath, cacheConf, conf); + HFile.Reader reader = HFile.createReader(fs, hfPath, cacheConf, conf); // Scanner doesn't do Cells yet. Fix. HFileScanner scanner = reader.getScanner(true, true); for (int i = 0; i < keys.size(); ++i) { @@ -86,4 +85,4 @@ public class TestHFileInlineToRootChunkConversion { } reader.close(); } -} +} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2a2961/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java index 76a8200..26adb49 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java @@ -130,7 +130,7 @@ public class TestHFileSeek extends TestCase { try { HFileContext context = new HFileContextBuilder() .withBlockSize(options.minBlockSize) - .withCompression(AbstractHFileWriter.compressionByName(options.compress)) + .withCompression(HFileWriterImpl.compressionByName(options.compress)) .build(); Writer writer = HFile.getWriterFactoryNoCache(conf) .withOutputStream(fout) http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2a2961/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java index 42e918a..ca063bc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java @@ -56,7 +56,7 @@ import org.junit.experimental.categories.Category; /** * Testing writing a version 2 {@link HFile}. This is a low-level test written - * during the development of {@link HFileWriterV2}. + * during the development of {@link HFileWriterImpl}. */ @Category({IOTests.class, SmallTests.class}) public class TestHFileWriterV2 { @@ -99,8 +99,7 @@ public class TestHFileWriterV2 { .withBlockSize(4096) .withCompression(compressAlgo) .build(); - HFileWriterV2 writer = (HFileWriterV2) - new HFileWriterV2.WriterFactoryV2(conf, new CacheConfig(conf)) + HFile.Writer writer = new HFileWriterFactory(conf, new CacheConfig(conf)) .withPath(fs, hfilePath) .withFileContext(context) .create(); @@ -136,7 +135,6 @@ public class TestHFileWriterV2 { FixedFileTrailer trailer = FixedFileTrailer.readFromStream(fsdis, fileSize); - assertEquals(2, trailer.getMajorVersion()); assertEquals(entryCount, trailer.getEntryCount()); HFileContext meta = new HFileContextBuilder() @@ -177,8 +175,7 @@ public class TestHFileWriterV2 { // File info FileInfo fileInfo = new FileInfo(); fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream()); - byte [] keyValueFormatVersion = fileInfo.get( - HFileWriterV2.KEY_VALUE_VERSION); + byte [] keyValueFormatVersion = fileInfo.get(HFileWriterImpl.KEY_VALUE_VERSION); boolean includeMemstoreTS = keyValueFormatVersion != null && Bytes.toInt(keyValueFormatVersion) > 0; http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2a2961/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java index f96e8ef..2ca9273 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java @@ -60,8 +60,7 @@ import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; /** - * Testing writing a version 3 {@link HFile}. This is a low-level test written - * during the development of {@link HFileWriterV3}. + * Testing writing a version 3 {@link HFile}. */ @RunWith(Parameterized.class) @Category({IOTests.class, SmallTests.class}) @@ -120,8 +119,7 @@ public class TestHFileWriterV3 { .withBlockSize(4096) .withIncludesTags(useTags) .withCompression(compressAlgo).build(); - HFileWriterV3 writer = (HFileWriterV3) - new HFileWriterV3.WriterFactoryV3(conf, new CacheConfig(conf)) + HFile.Writer writer = new HFileWriterFactory(conf, new CacheConfig(conf)) .withPath(fs, hfilePath) .withFileContext(context) .withComparator(KeyValue.COMPARATOR) @@ -206,8 +204,7 @@ public class TestHFileWriterV3 { // File info FileInfo fileInfo = new FileInfo(); fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream()); - byte [] keyValueFormatVersion = fileInfo.get( - HFileWriterV3.KEY_VALUE_VERSION); + byte [] keyValueFormatVersion = fileInfo.get(HFileWriterImpl.KEY_VALUE_VERSION); boolean includeMemstoreTS = keyValueFormatVersion != null && Bytes.toInt(keyValueFormatVersion) > 0; http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2a2961/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java index 2fd3684..0067417 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java @@ -89,8 +89,7 @@ public class TestLazyDataBlockDecompression { */ private static void writeHFile(Configuration conf, CacheConfig cc, FileSystem fs, Path path, HFileContext cxt, int entryCount) throws IOException { - HFileWriterV2 writer = (HFileWriterV2) - new HFileWriterV2.WriterFactoryV2(conf, cc) + HFile.Writer writer = new HFileWriterFactory(conf, cc) .withPath(fs, path) .withFileContext(cxt) .create(); @@ -118,7 +117,7 @@ public class TestLazyDataBlockDecompression { long fileSize = fs.getFileStatus(path).getLen(); FixedFileTrailer trailer = FixedFileTrailer.readFromStream(fsdis.getStream(false), fileSize); - HFileReaderV2 reader = new HFileReaderV2(path, trailer, fsdis, fileSize, cacheConfig, + HFile.Reader reader = new HFileReaderImpl(path, trailer, fsdis, fileSize, cacheConfig, fsdis.getHfs(), conf); reader.loadFileInfo(); long offset = trailer.getFirstDataBlockOffset(), http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2a2961/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java index 4ceafb4..6a12616 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java @@ -55,7 +55,6 @@ public class TestPrefetch { @Before public void setUp() throws IOException { conf = TEST_UTIL.getConfiguration(); - conf.setInt(HFile.FORMAT_VERSION_KEY, 3); conf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, true); fs = HFileSystem.get(conf); CacheConfig.blockCacheDisabled = false; @@ -70,10 +69,9 @@ public class TestPrefetch { private void readStoreFile(Path storeFilePath) throws Exception { // Open the file - HFileReaderV2 reader = (HFileReaderV2) HFile.createReader(fs, - storeFilePath, cacheConf, conf); + HFile.Reader reader = HFile.createReader(fs, storeFilePath, cacheConf, conf); - while (!((HFileReaderV3)reader).prefetchComplete()) { + while (!reader.prefetchComplete()) { // Sleep for a bit Thread.sleep(1000); } http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2a2961/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java index 3a0fdf7..9d7de02 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java @@ -37,7 +37,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; /** - * Test {@link HFileScanner#reseekTo(byte[])} + * Test {@link HFileScanner#reseekTo(org.apache.hadoop.hbase.Cell)} */ @Category({IOTests.class, SmallTests.class}) public class TestReseekTo { http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2a2961/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java index b9a126f..69bc09d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java @@ -73,11 +73,6 @@ public class TestSeekTo extends HBaseTestCase { Path makeNewFile(TagUsage tagUsage) throws IOException { Path ncTFile = new Path(testDir, "basic.hfile"); - if (tagUsage != TagUsage.NO_TAG) { - conf.setInt("hfile.format.version", 3); - } else { - conf.setInt("hfile.format.version", 2); - } FSDataOutputStream fout = this.fs.create(ncTFile); int blocksize = toKV("a", tagUsage).getLength() * 3; HFileContext context = new HFileContextBuilder().withBlockSize(blocksize) @@ -142,7 +137,7 @@ public class TestSeekTo extends HBaseTestCase { @Test public void testSeekBeforeWithReSeekTo() throws Exception { - testSeekBeforeWithReSeekToInternals(TagUsage.NO_TAG); + testSeekBeforeInternals(TagUsage.NO_TAG); testSeekBeforeWithReSeekToInternals(TagUsage.ONLY_TAG); testSeekBeforeWithReSeekToInternals(TagUsage.PARTIAL_TAG); } @@ -232,7 +227,7 @@ public class TestSeekTo extends HBaseTestCase { @Test public void testSeekTo() throws Exception { - testSeekToInternals(TagUsage.NO_TAG); + testSeekBeforeInternals(TagUsage.NO_TAG); testSeekToInternals(TagUsage.ONLY_TAG); testSeekToInternals(TagUsage.PARTIAL_TAG); } @@ -262,7 +257,7 @@ public class TestSeekTo extends HBaseTestCase { @Test public void testBlockContainingKey() throws Exception { - testBlockContainingKeyInternals(TagUsage.NO_TAG); + testSeekBeforeInternals(TagUsage.NO_TAG); testBlockContainingKeyInternals(TagUsage.ONLY_TAG); testBlockContainingKeyInternals(TagUsage.PARTIAL_TAG); } http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2a2961/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java index 1927334..6544c72 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java @@ -49,7 +49,7 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFileBlock; import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; -import org.apache.hadoop.hbase.io.hfile.HFileReaderV2; +import org.apache.hadoop.hbase.io.hfile.HFileReaderImpl; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.compress.CompressionOutputStream; import org.apache.hadoop.io.compress.Compressor; @@ -602,8 +602,9 @@ public class DataBlockEncodingTool { // run the utilities DataBlockEncodingTool comp = new DataBlockEncodingTool(compressionName); int majorVersion = reader.getHFileVersion(); - comp.useHBaseChecksum = majorVersion > 2 - || (majorVersion == 2 && reader.getHFileMinorVersion() >= HFileReaderV2.MINOR_VERSION_WITH_CHECKSUM); + comp.useHBaseChecksum = majorVersion > 2 || + (majorVersion == 2 && + reader.getHFileMinorVersion() >= HFileReaderImpl.MINOR_VERSION_WITH_CHECKSUM); comp.checkStatistics(scanner, kvLimit); if (doVerify) { comp.verifyCodecs(scanner, kvLimit); http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2a2961/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java index dc142d6..b7ebd23 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java @@ -48,7 +48,6 @@ import org.apache.hadoop.hbase.io.hfile.BlockType; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileBlock; -import org.apache.hadoop.hbase.io.hfile.HFileReaderV2; import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2; import org.apache.hadoop.hbase.wal.DefaultWALProvider; @@ -221,7 +220,7 @@ public class TestCacheOnWriteInSchema { BlockCache cache = cacheConf.getBlockCache(); StoreFile sf = new StoreFile(fs, path, conf, cacheConf, BloomType.ROWCOL); - HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader(); + HFile.Reader reader = sf.createReader().getHFileReader(); try { // Open a scanner with (on read) caching disabled HFileScanner scanner = reader.getScanner(false, false);