Return-Path: X-Original-To: apmail-hbase-commits-archive@www.apache.org Delivered-To: apmail-hbase-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 6DE91E387 for ; Tue, 29 Jan 2013 06:04:26 +0000 (UTC) Received: (qmail 91680 invoked by uid 500); 29 Jan 2013 06:04:26 -0000 Delivered-To: apmail-hbase-commits-archive@hbase.apache.org Received: (qmail 91470 invoked by uid 500); 29 Jan 2013 06:04:25 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 91444 invoked by uid 99); 29 Jan 2013 06:04:24 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 29 Jan 2013 06:04:24 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 29 Jan 2013 06:04:20 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 2FDDD2388900; Tue, 29 Jan 2013 06:04:00 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1439753 - in /hbase/trunk/hbase-server/src: main/java/org/apache/hadoop/hbase/io/hfile/ main/java/org/apache/hadoop/hbase/regionserver/ test/java/org/apache/hadoop/hbase/io/encoding/ test/java/org/apache/hadoop/hbase/io/hfile/ test/java/or... Date: Tue, 29 Jan 2013 06:03:59 -0000 To: commits@hbase.apache.org From: tedyu@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20130129060400.2FDDD2388900@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: tedyu Date: Tue Jan 29 06:03:59 2013 New Revision: 1439753 URL: http://svn.apache.org/viewvc?rev=1439753&view=rev Log: HBASE-7660 Remove HFileV1 code (Ted Yu) Removed: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestUpgradeFromHFileV1ToEncoding.java hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderV1.java Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java?rev=1439753&r1=1439752&r2=1439753&view=diff ============================================================================== --- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java (original) +++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java Tue Jan 29 06:03:59 2013 @@ -165,6 +165,9 @@ public class HFile { public final static String DEFAULT_COMPRESSION = DEFAULT_COMPRESSION_ALGORITHM.getName(); + /** Meta data block name for bloom filter bits. */ + public static final String BLOOM_FILTER_DATA_KEY = "BLOOM_FILTER_DATA"; + /** * We assume that HFile path ends with * ROOT_DIR/TABLE_NAME/REGION_NAME/CF_NAME/HFILE, so it has at least this @@ -447,8 +450,6 @@ public class HFile { CacheConfig cacheConf) { int version = getFormatVersion(conf); switch (version) { - case 1: - return new HFileWriterV1.WriterFactoryV1(conf, cacheConf); case 2: return new HFileWriterV2.WriterFactoryV2(conf, cacheConf); default: @@ -557,9 +558,6 @@ public class HFile { throw new CorruptHFileException("Problem reading HFile Trailer from file " + path, iae); } switch (trailer.getMajorVersion()) { - case 1: - return new HFileReaderV1(path, trailer, fsdis, size, closeIStream, - cacheConf); case 2: return new HFileReaderV2(path, trailer, fsdis, fsdisNoFsChecksum, size, closeIStream, Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java?rev=1439753&r1=1439752&r2=1439753&view=diff ============================================================================== --- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java (original) +++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java Tue Jan 29 06:03:59 2013 @@ -1317,110 +1317,6 @@ public class HFileBlock implements Cache } /** - * Reads version 1 blocks from the file system. In version 1 blocks, - * everything is compressed, including the magic record, if compression is - * enabled. Everything might be uncompressed if no compression is used. This - * reader returns blocks represented in the uniform version 2 format in - * memory. - */ - static class FSReaderV1 extends AbstractFSReader { - - /** Header size difference between version 1 and 2 */ - private static final int HEADER_DELTA = HEADER_SIZE_NO_CHECKSUM - - MAGIC_LENGTH; - - public FSReaderV1(FSDataInputStream istream, Algorithm compressAlgo, - long fileSize) throws IOException { - super(istream, istream, compressAlgo, fileSize, 0, null, null); - } - - /** - * Read a version 1 block. There is no uncompressed header, and the block - * type (the magic record) is part of the compressed data. This - * implementation assumes that the bounded range file input stream is - * needed to stop the decompressor reading into next block, because the - * decompressor just grabs a bunch of data without regard to whether it is - * coming to end of the compressed section. - * - * The block returned is still a version 2 block, and in particular, its - * first {@link #HEADER_SIZE} bytes contain a valid version 2 header. - * - * @param offset the offset of the block to read in the file - * @param onDiskSizeWithMagic the on-disk size of the version 1 block, - * including the magic record, which is the part of compressed - * data if using compression - * @param uncompressedSizeWithMagic uncompressed size of the version 1 - * block, including the magic record - */ - @Override - public HFileBlock readBlockData(long offset, long onDiskSizeWithMagic, - int uncompressedSizeWithMagic, boolean pread) throws IOException { - if (uncompressedSizeWithMagic <= 0) { - throw new IOException("Invalid uncompressedSize=" - + uncompressedSizeWithMagic + " for a version 1 block"); - } - - if (onDiskSizeWithMagic <= 0 || onDiskSizeWithMagic >= Integer.MAX_VALUE) - { - throw new IOException("Invalid onDiskSize=" + onDiskSizeWithMagic - + " (maximum allowed: " + Integer.MAX_VALUE + ")"); - } - - int onDiskSize = (int) onDiskSizeWithMagic; - - if (uncompressedSizeWithMagic < MAGIC_LENGTH) { - throw new IOException("Uncompressed size for a version 1 block is " - + uncompressedSizeWithMagic + " but must be at least " - + MAGIC_LENGTH); - } - - // The existing size already includes magic size, and we are inserting - // a version 2 header. - ByteBuffer buf = ByteBuffer.allocate(uncompressedSizeWithMagic - + HEADER_DELTA); - - int onDiskSizeWithoutHeader; - if (compressAlgo == Compression.Algorithm.NONE) { - // A special case when there is no compression. - if (onDiskSize != uncompressedSizeWithMagic) { - throw new IOException("onDiskSize=" + onDiskSize - + " and uncompressedSize=" + uncompressedSizeWithMagic - + " must be equal for version 1 with no compression"); - } - - // The first MAGIC_LENGTH bytes of what this will read will be - // overwritten. - readAtOffset(istream, buf.array(), buf.arrayOffset() + HEADER_DELTA, - onDiskSize, false, offset, pread); - - onDiskSizeWithoutHeader = uncompressedSizeWithMagic - MAGIC_LENGTH; - } else { - InputStream bufferedBoundedStream = createBufferedBoundedStream( - offset, onDiskSize, pread); - Compression.decompress(buf.array(), buf.arrayOffset() - + HEADER_DELTA, bufferedBoundedStream, onDiskSize, - uncompressedSizeWithMagic, this.compressAlgo); - - // We don't really have a good way to exclude the "magic record" size - // from the compressed block's size, since it is compressed as well. - onDiskSizeWithoutHeader = onDiskSize; - } - - BlockType newBlockType = BlockType.parse(buf.array(), buf.arrayOffset() - + HEADER_DELTA, MAGIC_LENGTH); - - // We set the uncompressed size of the new HFile block we are creating - // to the size of the data portion of the block without the magic record, - // since the magic record gets moved to the header. - HFileBlock b = new HFileBlock(newBlockType, onDiskSizeWithoutHeader, - uncompressedSizeWithMagic - MAGIC_LENGTH, -1L, buf, FILL_HEADER, - offset, MemStore.NO_PERSISTENT_TS, 0, 0, ChecksumType.NULL.getCode(), - onDiskSizeWithoutHeader + HEADER_SIZE_NO_CHECKSUM); - return b; - } - } - - /** * We always prefetch the header of the next block, so that we know its * on-disk size in advance and can read it in one operation. */ Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java?rev=1439753&r1=1439752&r2=1439753&view=diff ============================================================================== --- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (original) +++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java Tue Jan 29 06:03:59 2013 @@ -56,7 +56,6 @@ import org.apache.hadoop.hbase.io.hfile. import org.apache.hadoop.hbase.io.hfile.BlockType; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileScanner; -import org.apache.hadoop.hbase.io.hfile.HFileWriterV1; import org.apache.hadoop.hbase.io.hfile.HFileWriterV2; import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder; import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder; @@ -1535,7 +1534,7 @@ public class StoreFile { bloom = null; shouldCheckBloom = true; } else { - bloom = reader.getMetaBlock(HFileWriterV1.BLOOM_FILTER_DATA_KEY, + bloom = reader.getMetaBlock(HFile.BLOOM_FILTER_DATA_KEY, true); shouldCheckBloom = bloom != null; } Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java?rev=1439753&r1=1439752&r2=1439753&view=diff ============================================================================== --- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java (original) +++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java Tue Jan 29 06:03:59 2013 @@ -80,8 +80,6 @@ public class TestForceCacheImportantBloc public static Collection parameters() { // HFile versions return Arrays.asList(new Object[][] { - new Object[] { new Integer(1), false }, - new Object[] { new Integer(1), true }, new Object[] { new Integer(2), false }, new Object[] { new Integer(2), true } }); Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java?rev=1439753&r1=1439752&r2=1439753&view=diff ============================================================================== --- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java (original) +++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java Tue Jan 29 06:03:59 2013 @@ -258,41 +258,6 @@ public class TestHFileBlock { } @Test - public void testReaderV1() throws IOException { - for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) { - for (boolean pread : new boolean[] { false, true }) { - byte[] block = createTestV1Block(algo); - Path path = new Path(TEST_UTIL.getDataTestDir(), - "blocks_v1_"+ algo); - LOG.info("Creating temporary file at " + path); - FSDataOutputStream os = fs.create(path); - int totalSize = 0; - int numBlocks = 50; - for (int i = 0; i < numBlocks; ++i) { - os.write(block); - totalSize += block.length; - } - os.close(); - - FSDataInputStream is = fs.open(path); - HFileBlock.FSReader hbr = new HFileBlock.FSReaderV1(is, algo, - totalSize); - HFileBlock b; - int numBlocksRead = 0; - long pos = 0; - while (pos < totalSize) { - b = hbr.readBlockData(pos, block.length, uncompressedSizeV1, pread); - b.sanityCheck(); - pos += block.length; - numBlocksRead++; - } - assertEquals(numBlocks, numBlocksRead); - is.close(); - } - } - } - - @Test public void testReaderV2() throws IOException { for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) { for (boolean pread : new boolean[] { false, true }) { Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java?rev=1439753&r1=1439752&r2=1439753&view=diff ============================================================================== --- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java (original) +++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java Tue Jan 29 06:03:59 2013 @@ -169,41 +169,6 @@ public class TestHFileBlockCompatibility } @Test - public void testReaderV1() throws IOException { - for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) { - for (boolean pread : new boolean[] { false, true }) { - byte[] block = createTestV1Block(algo); - Path path = new Path(TEST_UTIL.getDataTestDir(), - "blocks_v1_"+ algo); - LOG.info("Creating temporary file at " + path); - FSDataOutputStream os = fs.create(path); - int totalSize = 0; - int numBlocks = 50; - for (int i = 0; i < numBlocks; ++i) { - os.write(block); - totalSize += block.length; - } - os.close(); - - FSDataInputStream is = fs.open(path); - HFileBlock.FSReader hbr = new HFileBlock.FSReaderV1(is, algo, - totalSize); - HFileBlock b; - int numBlocksRead = 0; - long pos = 0; - while (pos < totalSize) { - b = hbr.readBlockData(pos, block.length, uncompressedSizeV1, pread); - b.sanityCheck(); - pos += block.length; - numBlocksRead++; - } - assertEquals(numBlocks, numBlocksRead); - is.close(); - } - } - } - - @Test public void testReaderV2() throws IOException { for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) { for (boolean pread : new boolean[] { false, true }) { Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java?rev=1439753&r1=1439752&r2=1439753&view=diff ============================================================================== --- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java (original) +++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java Tue Jan 29 06:03:59 2013 @@ -202,21 +202,6 @@ public class TestSplitTransaction { assertFalse(st.prepare()); } - @Test public void testWholesomeSplitWithHFileV1() throws IOException { - int defaultVersion = TEST_UTIL.getConfiguration().getInt( - HFile.FORMAT_VERSION_KEY, 2); - TEST_UTIL.getConfiguration().setInt(HFile.FORMAT_VERSION_KEY, 1); - try { - for (Store store : this.parent.stores.values()) { - store.getFamily().setBloomFilterType(BloomType.ROW); - } - testWholesomeSplit(); - } finally { - TEST_UTIL.getConfiguration().setInt(HFile.FORMAT_VERSION_KEY, - defaultVersion); - } - } - @Test public void testWholesomeSplit() throws IOException { final int rowcount = TEST_UTIL.loadRegion(this.parent, CF, true); assertTrue(rowcount > 0); Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java?rev=1439753&r1=1439752&r2=1439753&view=diff ============================================================================== --- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java (original) +++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java Tue Jan 29 06:03:59 2013 @@ -587,60 +587,6 @@ public class TestStoreFile extends HBase } } - public void testBloomEdgeCases() throws Exception { - float err = (float)0.005; - FileSystem fs = FileSystem.getLocal(conf); - Path f = new Path(ROOT_DIR, getName()); - conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, err); - conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true); - conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_MAX_KEYS, 1000); - - // This test only runs for HFile format version 1. - conf.setInt(HFile.FORMAT_VERSION_KEY, 1); - - // this should not create a bloom because the max keys is too small - StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs, - StoreFile.DEFAULT_BLOCKSIZE_SMALL) - .withFilePath(f) - .withBloomType(BloomType.ROW) - .withMaxKeyCount(2000) - .withChecksumType(CKTYPE) - .withBytesPerChecksum(CKBYTES) - .build(); - assertFalse(writer.hasGeneralBloom()); - writer.close(); - fs.delete(f, true); - - conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_MAX_KEYS, - Integer.MAX_VALUE); - - // TODO: commented out because we run out of java heap space on trunk - // the below config caused IllegalArgumentException in our production cluster - // however, the resulting byteSize is < MAX_INT, so this should work properly - writer = new StoreFile.WriterBuilder(conf, cacheConf, fs, - StoreFile.DEFAULT_BLOCKSIZE_SMALL) - .withFilePath(f) - .withBloomType(BloomType.ROW) - .withMaxKeyCount(27244696) - .build(); - assertTrue(writer.hasGeneralBloom()); - bloomWriteRead(writer, fs); - - // this, however, is too large and should not create a bloom - // because Java can't create a contiguous array > MAX_INT - writer = new StoreFile.WriterBuilder(conf, cacheConf, fs, - StoreFile.DEFAULT_BLOCKSIZE_SMALL) - .withFilePath(f) - .withBloomType(BloomType.ROW) - .withMaxKeyCount(Integer.MAX_VALUE) - .withChecksumType(CKTYPE) - .withBytesPerChecksum(CKBYTES) - .build(); - assertFalse(writer.hasGeneralBloom()); - writer.close(); - fs.delete(f, true); - } - public void testSeqIdComparator() { assertOrdering(StoreFile.Comparators.SEQ_ID, mockStoreFile(true, 1000, -1, "/foo/123"),