Return-Path: Delivered-To: apmail-hadoop-hbase-commits-archive@minotaur.apache.org Received: (qmail 36779 invoked from network); 7 Jul 2009 03:44:56 -0000 Received: from hermes.apache.org (HELO mail.apache.org) (140.211.11.3) by minotaur.apache.org with SMTP; 7 Jul 2009 03:44:56 -0000 Received: (qmail 32057 invoked by uid 500); 7 Jul 2009 03:45:06 -0000 Delivered-To: apmail-hadoop-hbase-commits-archive@hadoop.apache.org Received: (qmail 32013 invoked by uid 500); 7 Jul 2009 03:45:06 -0000 Mailing-List: contact hbase-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hbase-dev@hadoop.apache.org Delivered-To: mailing list hbase-commits@hadoop.apache.org Received: (qmail 32004 invoked by uid 99); 7 Jul 2009 03:45:05 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 07 Jul 2009 03:45:05 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=10.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 07 Jul 2009 03:45:02 +0000 Received: by eris.apache.org (Postfix, from userid 65534) id B968F2388876; Tue, 7 Jul 2009 03:44:40 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r791693 - in /hadoop/hbase/trunk: ./ src/java/org/apache/hadoop/hbase/io/ src/java/org/apache/hadoop/hbase/io/hfile/ src/java/org/apache/hadoop/hbase/regionserver/ src/test/org/apache/hadoop/hbase/regionserver/ Date: Tue, 07 Jul 2009 03:44:40 -0000 To: hbase-commits@hadoop.apache.org From: stack@apache.org X-Mailer: svnmailer-1.0.8 Message-Id: <20090707034440.B968F2388876@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: stack Date: Tue Jul 7 03:44:40 2009 New Revision: 791693 URL: http://svn.apache.org/viewvc?rev=791693&view=rev Log: HBASE-1615 HBASE-1597 introduced a bug when compacting after a split Modified: hadoop/hbase/trunk/CHANGES.txt hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java Modified: hadoop/hbase/trunk/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=791693&r1=791692&r2=791693&view=diff ============================================================================== --- hadoop/hbase/trunk/CHANGES.txt (original) +++ hadoop/hbase/trunk/CHANGES.txt Tue Jul 7 03:44:40 2009 @@ -242,6 +242,8 @@ HBASE-1595 hadoop-default.xml and zoo.cfg in hbase jar HBASE-1602 HRegionServer won't go down since we added in new LruBlockCache HBASE-1608 TestCachedBlockQueue failing on some jvms (Jon Gray via Stack) + HBASE-1615 HBASE-1597 introduced a bug when compacting after a split + (Jon Gray via Stack) IMPROVEMENTS HBASE-1089 Add count of regions on filesystem to master UI; add percentage Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java?rev=791693&r1=791692&r2=791693&view=diff ============================================================================== --- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java (original) +++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java Tue Jul 7 03:44:40 2009 @@ -80,7 +80,12 @@ @Override public HFileScanner getScanner() { - final HFileScanner s = super.getScanner(); + return this.getScanner(true); + } + + @Override + public HFileScanner getScanner(boolean cacheBlocks) { + final HFileScanner s = super.getScanner(cacheBlocks); return new HFileScanner() { final HFileScanner delegate = s; public boolean atEnd = false; Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java?rev=791693&r1=791692&r2=791693&view=diff ============================================================================== --- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java (original) +++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java Tue Jul 7 03:44:40 2009 @@ -671,7 +671,7 @@ */ @SuppressWarnings("unused") private Reader() throws IOException { - this(null, null, null, false); + this(null, -1, null, false); } /** @@ -706,7 +706,7 @@ this.fileSize = size; this.istream = fsdis; this.closeIStream = false; - this.name = this.istream.toString(); + this.name = this.istream == null? "": this.istream.toString(); this.inMemory = inMemory; } @@ -817,8 +817,20 @@ * @return Scanner on this file. */ public HFileScanner getScanner() { - return new Scanner(this); + return new Scanner(this, true); } + + /** + * Create a Scanner on this file. No seeks or reads are done on creation. + * Call {@link HFileScanner#seekTo(byte[])} to position an start the read. + * There is nothing to clean up in a Scanner. Letting go of your references + * to the scanner is sufficient. + * @return Scanner on this file. + */ + public HFileScanner getScanner(boolean cacheBlocks) { + return new Scanner(this, cacheBlocks); + } + /** * @param key Key to search. * @return Block number of the block containing the key or -1 if not in this @@ -873,7 +885,7 @@ * @return Block wrapped in a ByteBuffer. * @throws IOException */ - ByteBuffer readBlock(int block) throws IOException { + ByteBuffer readBlock(int block, boolean cacheBlock) throws IOException { if (blockIndex == null) { throw new IOException("Block index not loaded"); } @@ -926,25 +938,13 @@ buf.rewind(); // Cache the block - cacheBlock(name + block, buf.duplicate()); + if(cacheBlock && cache != null) { + cache.cacheBlock(name + block, buf.duplicate(), inMemory); + } return buf; } } - - /** - * Cache this block if there is a block cache available.

- * - * Makes a copy of the ByteBuffer, not the one we are sending back, so the - * position does not get messed up. - * @param blockName - * @param buf - */ - void cacheBlock(String blockName, ByteBuffer buf) { - if (cache != null) { - cache.cacheBlock(blockName, buf.duplicate(), inMemory); - } - } /* * Decompress compressedSize bytes off the backing @@ -1041,6 +1041,8 @@ private final Reader reader; private ByteBuffer block; private int currBlock; + + private boolean cacheBlocks = false; private int currKeyLen = 0; private int currValueLen = 0; @@ -1051,6 +1053,11 @@ this.reader = r; } + public Scanner(Reader r, boolean cacheBlocks) { + this.reader = r; + this.cacheBlocks = cacheBlocks; + } + public KeyValue getKeyValue() { if(this.block == null) { return null; @@ -1099,7 +1106,7 @@ block = null; return false; } - block = reader.readBlock(currBlock); + block = reader.readBlock(currBlock, cacheBlocks); currKeyLen = block.getInt(); currValueLen = block.getInt(); blockFetches++; @@ -1230,7 +1237,7 @@ currValueLen = block.getInt(); } currBlock = 0; - block = reader.readBlock(currBlock); + block = reader.readBlock(currBlock, cacheBlocks); currKeyLen = block.getInt(); currValueLen = block.getInt(); blockFetches++; @@ -1239,12 +1246,12 @@ private void loadBlock(int bloc) throws IOException { if (block == null) { - block = reader.readBlock(bloc); + block = reader.readBlock(bloc, cacheBlocks); currBlock = bloc; blockFetches++; } else { if (bloc != currBlock) { - block = reader.readBlock(bloc); + block = reader.readBlock(bloc, cacheBlocks); currBlock = bloc; blockFetches++; } else { @@ -1260,35 +1267,6 @@ } } - - /** - * HFile Reader that does not cache blocks that were not already cached.

- * - * Used for compactions. - */ - public static class CompactionReader extends Reader { - public CompactionReader(Reader reader) { - super(reader.istream, reader.fileSize, reader.cache, reader.inMemory); - super.blockIndex = reader.blockIndex; - super.trailer = reader.trailer; - super.lastkey = reader.lastkey; - super.avgKeyLen = reader.avgKeyLen; - super.avgValueLen = reader.avgValueLen; - super.comparator = reader.comparator; - super.metaIndex = reader.metaIndex; - super.fileInfoLoaded = reader.fileInfoLoaded; - super.compressAlgo = reader.compressAlgo; - } - - /** - * Do not cache this block when doing a compaction. - */ - @Override - void cacheBlock(String blockName, ByteBuffer buf) { - return; - } - } - /* * The RFile has a fixed trailer which contains offsets to other variable * parts of the file. Also includes basic metadata on this file. Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java?rev=791693&r1=791692&r2=791693&view=diff ============================================================================== --- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java (original) +++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java Tue Jul 7 03:44:40 2009 @@ -54,7 +54,6 @@ import org.apache.hadoop.hbase.io.hfile.Compression; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileScanner; -import org.apache.hadoop.hbase.io.hfile.HFile.CompactionReader; import org.apache.hadoop.hbase.io.hfile.HFile.Reader; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; @@ -681,12 +680,12 @@ LOG.warn("Path is null for " + file); return null; } - CompactionReader r = file.getCompactionReader(); + Reader r = file.getReader(); if (r == null) { LOG.warn("StoreFile " + file + " has a null Reader"); continue; } - long len = file.getCompactionReader().length(); + long len = file.getReader().length(); fileSizes[i] = len; totalSize += len; } @@ -845,12 +844,13 @@ // init: for (int i = 0; i < filesToCompact.size(); ++i) { // TODO open a new HFile.Reader w/o block cache. - CompactionReader r = filesToCompact.get(i).getCompactionReader(); + Reader r = filesToCompact.get(i).getReader(); if (r == null) { LOG.warn("StoreFile " + filesToCompact.get(i) + " has a null Reader"); continue; } - scanners[i] = new StoreFileScanner(r.getScanner()); + // Instantiate HFile.Reader.Scanner to not cache blocks + scanners[i] = new StoreFileScanner(r.getScanner(false)); } if (majorCompaction) { @@ -960,7 +960,7 @@ // 4. Compute new store size this.storeSize = 0L; for (StoreFile hsf : this.storefiles.values()) { - Reader r = hsf.getCompactionReader(); + Reader r = hsf.getReader(); if (r == null) { LOG.warn("StoreFile " + hsf + " has a null Reader"); continue; Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java?rev=791693&r1=791692&r2=791693&view=diff ============================================================================== --- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (original) +++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java Tue Jul 7 03:44:40 2009 @@ -310,14 +310,6 @@ } /** - * Gets a special Reader for use during compactions. Will not cache blocks. - * @return Current reader. Must call open first else returns null. - */ - public HFile.CompactionReader getCompactionReader() { - return new HFile.CompactionReader(this.reader); - } - - /** * @throws IOException */ public synchronized void close() throws IOException { Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java?rev=791693&r1=791692&r2=791693&view=diff ============================================================================== --- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java (original) +++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java Tue Jul 7 03:44:40 2009 @@ -169,7 +169,7 @@ boolean containsStartRow = false; for (StoreFile f: this.r.stores.get(COLUMN_FAMILY_TEXT).getStorefiles(). values()) { - HFileScanner scanner = f.getCompactionReader().getScanner(); + HFileScanner scanner = f.getReader().getScanner(false); scanner.seekTo(); do { byte [] row = scanner.getKeyValue().getRow();