Return-Path: X-Original-To: apmail-hbase-commits-archive@www.apache.org Delivered-To: apmail-hbase-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 4C7EAE536 for ; Wed, 13 Feb 2013 21:00:47 +0000 (UTC) Received: (qmail 88400 invoked by uid 500); 13 Feb 2013 21:00:47 -0000 Delivered-To: apmail-hbase-commits-archive@hbase.apache.org Received: (qmail 88367 invoked by uid 500); 13 Feb 2013 21:00:47 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 88359 invoked by uid 99); 13 Feb 2013 21:00:46 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 13 Feb 2013 21:00:46 +0000 X-ASF-Spam-Status: No, hits=-1998.0 required=5.0 tests=ALL_TRUSTED,FB_GET_MEDS X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 13 Feb 2013 21:00:24 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 0C1762388C6D; Wed, 13 Feb 2013 20:58:55 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1445918 [24/29] - in /hbase/branches/hbase-7290: ./ bin/ conf/ dev-support/ hbase-client/ hbase-common/ hbase-common/src/main/java/org/apache/hadoop/hbase/ hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/ hbase-common/src/ma... Date: Wed, 13 Feb 2013 20:58:32 -0000 To: commits@hbase.apache.org From: jmhsieh@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20130213205855.0C1762388C6D@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Modified: hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java?rev=1445918&r1=1445917&r2=1445918&view=diff ============================================================================== --- hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java (original) +++ hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java Wed Feb 13 20:58:23 2013 @@ -93,11 +93,11 @@ public class TestAtomicOperation extends a.setReturnResults(false); a.add(fam1, qual1, Bytes.toBytes(v1)); a.add(fam1, qual2, Bytes.toBytes(v2)); - assertNull(region.append(a, null, true)); + assertNull(region.append(a, true)); a = new Append(row); a.add(fam1, qual1, Bytes.toBytes(v2)); a.add(fam1, qual2, Bytes.toBytes(v1)); - Result result = region.append(a, null, true); + Result result = region.append(a, true); assertEquals(0, Bytes.compareTo(Bytes.toBytes(v1+v2), result.getValue(fam1, qual1))); assertEquals(0, Bytes.compareTo(Bytes.toBytes(v2+v1), result.getValue(fam1, qual2))); } @@ -150,7 +150,7 @@ public class TestAtomicOperation extends // run a get and see? Get get = new Get(row); get.addColumn(familiy, qualifier); - Result result = region.get(get, null); + Result result = region.get(get); assertEquals(1, result.size()); KeyValue kv = result.raw()[0]; @@ -210,11 +210,11 @@ public class TestAtomicOperation extends inc.addColumn(fam1, qual1, amount); inc.addColumn(fam1, qual2, amount*2); inc.addColumn(fam2, qual3, amount*3); - region.increment(inc, null, true); + region.increment(inc, true); // verify: Make sure we only see completed increments Get g = new Get(row); - Result result = region.get(g, null); + Result result = region.get(g); assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*2, Bytes.toLong(result.getValue(fam1, qual2))); assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*3, Bytes.toLong(result.getValue(fam2, qual3))); } catch (IOException e) { @@ -246,10 +246,10 @@ public class TestAtomicOperation extends a.add(fam1, qual1, val); a.add(fam1, qual2, val); a.add(fam2, qual3, val); - region.append(a, null, true); + region.append(a, true); Get g = new Get(row); - Result result = region.get(g, null); + Result result = region.get(g); assertEquals(result.getValue(fam1, qual1).length, result.getValue(fam1, qual2).length); assertEquals(result.getValue(fam1, qual1).length, result.getValue(fam2, qual3).length); } catch (IOException e) { @@ -276,7 +276,7 @@ public class TestAtomicOperation extends } assertEquals(0, failures.get()); Get g = new Get(row); - Result result = region.get(g, null); + Result result = region.get(g); assertEquals(result.getValue(fam1, qual1).length, 10000); assertEquals(result.getValue(fam1, qual2).length, 10000); assertEquals(result.getValue(fam2, qual3).length, 10000); @@ -336,7 +336,7 @@ public class TestAtomicOperation extends op ^= true; // check: should always see exactly one column Get g = new Get(row); - Result r = region.get(g, null); + Result r = region.get(g); if (r.size() != 1) { LOG.debug(r); failures.incrementAndGet(); Modified: hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java?rev=1445918&r1=1445917&r2=1445918&view=diff ============================================================================== --- hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java (original) +++ hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java Wed Feb 13 20:58:23 2013 @@ -42,7 +42,6 @@ import org.apache.hadoop.hbase.client.Sc import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; -import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; import org.junit.Test; @@ -89,7 +88,7 @@ public class TestBlocksRead extends HBas * @param tableName * @param callingMethod * @param conf - * @param families + * @param family * @throws IOException * @return created and initialized region. */ @@ -158,7 +157,7 @@ public class TestBlocksRead extends HBas get.addColumn(cf, Bytes.toBytes(column)); } - kvs = region.get(get, null).raw(); + kvs = region.get(get).raw(); long blocksEnd = getBlkAccessCount(cf); if (expBlocks[i] != -1) { assertEquals("Blocks Read Check for Bloom: " + bloomType, expBlocks[i], @@ -189,7 +188,7 @@ public class TestBlocksRead extends HBas del.deleteFamily(Bytes.toBytes(family + "_ROWCOL"), version); del.deleteFamily(Bytes.toBytes(family + "_ROW"), version); del.deleteFamily(Bytes.toBytes(family + "_NONE"), version); - region.delete(del, null, true); + region.delete(del, true); } private static void verifyData(KeyValue kv, String expectedRow, Modified: hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java?rev=1445918&r1=1445917&r2=1445918&view=diff ============================================================================== --- hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java (original) +++ hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java Wed Feb 13 20:58:23 2013 @@ -19,7 +19,7 @@ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.*; +import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.ArrayList; @@ -34,7 +34,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; @@ -49,11 +48,9 @@ import org.apache.hadoop.hbase.io.hfile. import org.apache.hadoop.hbase.io.hfile.HFileReaderV2; import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2; -import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLogFactory; import org.apache.hadoop.hbase.util.Bytes; - import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -64,7 +61,7 @@ import org.junit.runners.Parameterized.P /** * Tests {@link HFile} cache-on-write functionality for data blocks, non-root - * index blocks, and Bloom filter blocks, as specified by the column family. + * index blocks, and Bloom filter blocks, as specified by the column family. */ @RunWith(Parameterized.class) @Category(MediumTests.class) @@ -121,7 +118,9 @@ public class TestCacheOnWriteInSchema { private final CacheOnWriteType cowType; private Configuration conf; private final String testDescription; + private HRegion region; private HStore store; + private HLog hlog; private FileSystem fs; public TestCacheOnWriteInSchema(CacheOnWriteType cowType) { @@ -163,18 +162,35 @@ public class TestCacheOnWriteInSchema { fs.delete(logdir, true); HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); - HLog hlog = HLogFactory.createHLog(fs, basedir, logName, conf); - - HRegion region = new HRegion(basedir, hlog, fs, conf, info, htd, null); + hlog = HLogFactory.createHLog(fs, basedir, logName, conf); + + region = new HRegion(basedir, hlog, fs, conf, info, htd, null); store = new HStore(basedir, region, hcd, fs, conf); } @After - public void tearDown() { + public void tearDown() throws IOException { + IOException ex = null; + try { + region.close(); + } catch (IOException e) { + LOG.warn("Caught Exception", e); + ex = e; + } + try { + hlog.closeAndDelete(); + } catch (IOException e) { + LOG.warn("Caught Exception", e); + ex = e; + } try { fs.delete(new Path(DIR), true); } catch (IOException e) { LOG.error("Could not delete " + DIR, e); + ex = e; + } + if (ex != null) { + throw ex; } } @@ -190,7 +206,7 @@ public class TestCacheOnWriteInSchema { } private void readStoreFile(Path path) throws IOException { - CacheConfig cacheConf = store.getCacheConfig(); + CacheConfig cacheConf = store.getCacheConfig(); BlockCache cache = cacheConf.getBlockCache(); StoreFile sf = new StoreFile(fs, path, conf, cacheConf, BloomType.ROWCOL, null); Modified: hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java?rev=1445918&r1=1445917&r2=1445918&view=diff ============================================================================== --- hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java (original) +++ hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java Wed Feb 13 20:58:23 2013 @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hbase.cell.CellComparator; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -149,7 +150,7 @@ public class TestColumnSeeking { while (scanner.next(results)) ; assertEquals(kvSet.size(), results.size()); - assertTrue(results.containsAll(kvSet)); + assertTrue(KeyValueTestUtil.containsIgnoreMvccVersion(results, kvSet)); } } finally { HRegion.closeHRegion(region); @@ -260,7 +261,7 @@ public class TestColumnSeeking { while (scanner.next(results)) ; assertEquals(kvSet.size(), results.size()); - assertTrue(results.containsAll(kvSet)); + assertTrue(KeyValueTestUtil.containsIgnoreMvccVersion(results, kvSet)); } HRegion.closeHRegion(region); Modified: hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java?rev=1445918&r1=1445917&r2=1445918&view=diff ============================================================================== --- hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java (original) +++ hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java Wed Feb 13 20:58:23 2013 @@ -127,7 +127,7 @@ public class TestCompaction extends HBas do { List results = new ArrayList(); boolean result = s.next(results); - r.delete(new Delete(results.get(0).getRow()), null, false); + r.delete(new Delete(results.get(0).getRow()), false); if (!result) break; } while(true); s.close(); @@ -199,7 +199,7 @@ public class TestCompaction extends HBas // Default is that there only 3 (MAXVERSIONS) versions allowed per column. // // Assert == 3 when we ask for versions. - Result result = r.get(new Get(STARTROW).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null); + Result result = r.get(new Get(STARTROW).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100)); assertEquals(compactionThreshold, result.size()); // see if CompactionProgress is in place but null @@ -229,7 +229,7 @@ public class TestCompaction extends HBas // Always 3 versions if that is what max versions is. result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT). - setMaxVersions(100), null); + setMaxVersions(100)); LOG.debug("Row " + Bytes.toStringBinary(secondRowBytes) + " after " + "initial compaction: " + result); assertEquals("Invalid number of versions of row " @@ -242,32 +242,32 @@ public class TestCompaction extends HBas // should result in a compacted store file that has no references to the // deleted row. LOG.debug("Adding deletes to memstore and flushing"); - Delete delete = new Delete(secondRowBytes, System.currentTimeMillis(), null); + Delete delete = new Delete(secondRowBytes, System.currentTimeMillis()); byte [][] famAndQf = {COLUMN_FAMILY, null}; delete.deleteFamily(famAndQf[0]); - r.delete(delete, null, true); + r.delete(delete, true); // Assert deleted. - result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null ); + result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100)); assertTrue("Second row should have been deleted", result.isEmpty()); r.flushcache(); - result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null ); + result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100)); assertTrue("Second row should have been deleted", result.isEmpty()); // Add a bit of data and flush. Start adding at 'bbb'. createSmallerStoreFile(this.r); r.flushcache(); // Assert that the second row is still deleted. - result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null ); + result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100)); assertTrue("Second row should still be deleted", result.isEmpty()); // Force major compaction. r.compactStores(true); assertEquals(r.getStore(COLUMN_FAMILY_TEXT).getStorefiles().size(), 1); - result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100), null ); + result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100)); assertTrue("Second row should still be deleted", result.isEmpty()); // Make sure the store files do have some 'aaa' keys in them -- exactly 3. @@ -280,11 +280,11 @@ public class TestCompaction extends HBas final int ttl = 1000; for (Store hstore : this.r.stores.values()) { HStore store = ((HStore) hstore); - HStore.ScanInfo old = store.scanInfo; + HStore.ScanInfo old = store.getScanInfo(); HStore.ScanInfo si = new HStore.ScanInfo(old.getFamily(), old.getMinVersions(), old.getMaxVersions(), ttl, old.getKeepDeletedCells(), 0, old.getComparator()); - store.scanInfo = si; + store.setScanInfo(si); } Thread.sleep(1000); @@ -301,7 +301,7 @@ public class TestCompaction extends HBas conf.setFloat("hbase.hregion.majorcompaction.jitter", jitterPct); HStore s = ((HStore) r.getStore(COLUMN_FAMILY)); - s.compactionPolicy.updateConfiguration(conf, s); + s.compactionPolicy.setConf(conf); try { createStoreFile(r); createStoreFile(r); @@ -313,7 +313,7 @@ public class TestCompaction extends HBas assertEquals(2, s.getStorefilesCount()); // ensure that major compaction time is deterministic - CompactionPolicy c = s.compactionPolicy; + DefaultCompactionPolicy c = (DefaultCompactionPolicy)s.compactionPolicy; List storeFiles = s.getStorefiles(); long mcTime = c.getNextMajorCompactTime(storeFiles); for (int i = 0; i < 10; ++i) { @@ -406,22 +406,22 @@ public class TestCompaction extends HBas r.flushcache(); } - Result result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100), null); + Result result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100)); assertEquals(compactionThreshold, result.size()); - result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100), null); + result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100)); assertEquals(compactionThreshold, result.size()); // Now add deletes to memstore and then flush it. That will put us over // the compaction threshold of 3 store files. Compacting these store files // should result in a compacted store file that has no references to the // deleted row. - r.delete(delete, null, true); + r.delete(delete, true); // Make sure that we have only deleted family2 from secondRowBytes - result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100), null); + result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100)); assertEquals(expectedResultsAfterDelete, result.size()); // but we still have firstrow - result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100), null); + result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100)); assertEquals(compactionThreshold, result.size()); r.flushcache(); @@ -429,10 +429,10 @@ public class TestCompaction extends HBas // Let us check again // Make sure that we have only deleted family2 from secondRowBytes - result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100), null); + result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100)); assertEquals(expectedResultsAfterDelete, result.size()); // but we still have firstrow - result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100), null); + result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100)); assertEquals(compactionThreshold, result.size()); // do a compaction @@ -447,10 +447,10 @@ public class TestCompaction extends HBas assertTrue("Was not supposed to be a major compaction", numFiles2 > 1); // Make sure that we have only deleted family2 from secondRowBytes - result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100), null); + result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100)); assertEquals(expectedResultsAfterDelete, result.size()); // but we still have firstrow - result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100), null); + result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100)); assertEquals(compactionThreshold, result.size()); } @@ -530,7 +530,7 @@ public class TestCompaction extends HBas Delete delete = new Delete(Bytes.add(STARTROW, Bytes.toBytes(i))); byte [][] famAndQf = {COLUMN_FAMILY, null}; delete.deleteFamily(famAndQf[0]); - r.delete(delete, null, true); + r.delete(delete, true); } r.flushcache(); @@ -539,11 +539,11 @@ public class TestCompaction extends HBas final int ttl = 1000; for (Store hstore: this.r.stores.values()) { HStore store = (HStore)hstore; - HStore.ScanInfo old = store.scanInfo; + HStore.ScanInfo old = store.getScanInfo(); HStore.ScanInfo si = new HStore.ScanInfo(old.getFamily(), old.getMinVersions(), old.getMaxVersions(), ttl, old.getKeepDeletedCells(), 0, old.getComparator()); - store.scanInfo = si; + store.setScanInfo(si); } Thread.sleep(ttl); @@ -588,15 +588,15 @@ public class TestCompaction extends HBas HStore store = (HStore) r.getStore(COLUMN_FAMILY); List storeFiles = store.getStorefiles(); - long maxId = StoreFile.getMaxSequenceIdInList(storeFiles, true); - Compactor tool = new Compactor(this.conf); + Compactor tool = store.compactionPolicy.getCompactor(); - StoreFile.Writer compactedFile = - tool.compact(store, storeFiles, false, maxId); + List newFiles = + tool.compact(storeFiles, false); // Now lets corrupt the compacted file. FileSystem fs = FileSystem.get(conf); - Path origPath = compactedFile.getPath(); + // default compaction policy created one and only one new compacted file + Path origPath = newFiles.get(0); Path homedir = store.getHomedir(); Path dstPath = new Path(homedir, origPath.getName()); FSDataOutputStream stream = fs.create(origPath, null, true, 512, (short) 3, @@ -606,7 +606,7 @@ public class TestCompaction extends HBas stream.close(); try { - store.completeCompaction(storeFiles, compactedFile); + store.completeCompaction(storeFiles, origPath); } catch (Exception e) { // The complete compaction should fail and the corrupt file should remain // in the 'tmp' directory; Modified: hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java?rev=1445918&r1=1445917&r2=1445918&view=diff ============================================================================== --- hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java (original) +++ hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java Wed Feb 13 20:58:23 2013 @@ -167,14 +167,11 @@ public class TestCompactionState { assertEquals(CompactionState.NONE, state); } } else { - curt = System.currentTimeMillis(); - waitTime = 20000; - endt = curt + waitTime; + // Wait until the compaction is done state = admin.getCompactionState(table); while (state != CompactionState.NONE && curt < endt) { Thread.sleep(10); state = admin.getCompactionState(table); - curt = System.currentTimeMillis(); } // Now, compaction should be done. assertEquals(CompactionState.NONE, state); @@ -233,6 +230,5 @@ public class TestCompactionState { TEST_UTIL.flush(); puts.clear(); } - } - + } } Modified: hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java?rev=1445918&r1=1445917&r2=1445918&view=diff ============================================================================== --- hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java (original) +++ hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java Wed Feb 13 20:58:23 2013 @@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.io.hfile. import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2; -import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; import org.apache.hadoop.hbase.util.BloomFilterFactory; import org.apache.hadoop.hbase.util.ByteBloomFilter; import org.apache.hadoop.hbase.util.Bytes; Modified: hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java?rev=1445918&r1=1445917&r2=1445918&view=diff ============================================================================== --- hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java (original) +++ hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java Wed Feb 13 20:58:23 2013 @@ -25,21 +25,25 @@ import java.util.GregorianCalendar; import java.util.List; import junit.framework.TestCase; -import org.junit.experimental.categories.Category; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.SmallTests; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder; -import org.apache.hadoop.hbase.regionserver.compactions.*; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLogFactory; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.junit.After; +import org.junit.experimental.categories.Category; import com.google.common.collect.Lists; @@ -53,7 +57,6 @@ public class TestDefaultCompactSelection private static final String DIR= TEST_UTIL.getDataTestDir(TestDefaultCompactSelection.class.getSimpleName()).toString(); private static Path TEST_FILE; - private CompactionPolicy manager; protected static final int minFiles = 3; protected static final int maxFiles = 5; @@ -61,6 +64,8 @@ public class TestDefaultCompactSelection protected static final long minSize = 10; protected static final long maxSize = 1000; + private HLog hlog; + private HRegion region; @Override public void setUp() throws Exception { @@ -77,7 +82,6 @@ public class TestDefaultCompactSelection Path basedir = new Path(DIR); String logName = "logs"; Path logdir = new Path(DIR, logName); - Path oldLogDir = new Path(basedir, HConstants.HREGION_OLDLOGDIR_NAME); HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("family")); FileSystem fs = FileSystem.get(conf); @@ -87,20 +91,39 @@ public class TestDefaultCompactSelection htd.addFamily(hcd); HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); - HLog hlog = HLogFactory.createHLog(fs, basedir, + hlog = HLogFactory.createHLog(fs, basedir, logName, conf); - HRegion region = HRegion.createHRegion(info, basedir, conf, htd); + region = HRegion.createHRegion(info, basedir, conf, htd); HRegion.closeHRegion(region); Path tableDir = new Path(basedir, Bytes.toString(htd.getName())); region = new HRegion(tableDir, hlog, fs, conf, info, htd, null); store = new HStore(basedir, region, hcd, fs, conf); - manager = store.compactionPolicy; TEST_FILE = StoreFile.getRandomFilename(fs, store.getHomedir()); fs.create(TEST_FILE); } + @After + public void tearDown() throws IOException { + IOException ex = null; + try { + region.close(); + } catch (IOException e) { + LOG.warn("Caught Exception", e); + ex = e; + } + try { + hlog.closeAndDelete(); + } catch (IOException e) { + LOG.warn("Caught Exception", e); + ex = e; + } + if (ex != null) { + throw ex; + } + } + // used so our tests don't deal with actual StoreFiles static class MockStoreFile extends StoreFile { long length = 0; @@ -255,7 +278,7 @@ public class TestDefaultCompactSelection compactEquals(sfCreate(100,50,23,12,12), true, 23, 12, 12); conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 1); conf.setFloat("hbase.hregion.majorcompaction.jitter", 0); - store.compactionPolicy.updateConfiguration(conf, store); + store.compactionPolicy.updateConfiguration(); try { // trigger an aged major compaction compactEquals(sfCreate(50,25,12,12), 50, 25, 12, 12); @@ -286,7 +309,7 @@ public class TestDefaultCompactSelection * current compaction algorithm. Developed to ensure that refactoring * doesn't implicitly alter this. */ - long tooBig = maxSize + 1; + //long tooBig = maxSize + 1; Calendar calendar = new GregorianCalendar(); int hourOfDay = calendar.get(Calendar.HOUR_OF_DAY); @@ -307,13 +330,13 @@ public class TestDefaultCompactSelection this.conf.setLong("hbase.offpeak.end.hour", hourPlusOne); LOG.debug("Testing compact selection with off-peak settings (" + hourMinusOne + ", " + hourPlusOne + ")"); - store.compactionPolicy.updateConfiguration(this.conf, store); + store.compactionPolicy.updateConfiguration(); compactEquals(sfCreate(999, 50, 12, 12, 1), 50, 12, 12, 1); // set peak hour outside current selection and check compact selection this.conf.setLong("hbase.offpeak.start.hour", hourMinusTwo); this.conf.setLong("hbase.offpeak.end.hour", hourMinusOne); - store.compactionPolicy.updateConfiguration(this.conf, store); + store.compactionPolicy.updateConfiguration(); LOG.debug("Testing compact selection with off-peak settings (" + hourMinusTwo + ", " + hourMinusOne + ")"); compactEquals(sfCreate(999,50,12,12, 1), 12, 12, 1); Modified: hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java?rev=1445918&r1=1445917&r2=1445918&view=diff ============================================================================== --- hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java (original) +++ hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java Wed Feb 13 20:58:23 2013 @@ -44,7 +44,6 @@ import org.apache.hadoop.hbase.fs.HFileS import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder; -import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; import org.apache.hadoop.hbase.util.Bytes; import org.junit.Assume; import org.junit.Test; @@ -81,7 +80,7 @@ public class TestFSErrorsExposed { writer, Bytes.toBytes("cf"), Bytes.toBytes("qual")); StoreFile sf = new StoreFile(fs, writer.getPath(), - util.getConfiguration(), cacheConf, StoreFile.BloomType.NONE, + util.getConfiguration(), cacheConf, BloomType.NONE, NoOpDataBlockEncoder.INSTANCE); StoreFile.Reader reader = sf.createReader(); Modified: hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java?rev=1445918&r1=1445917&r2=1445918&view=diff ============================================================================== --- hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java (original) +++ hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java Wed Feb 13 20:58:23 2013 @@ -112,7 +112,7 @@ public class TestGetClosestAtOrBefore ex try { List keys = new ArrayList(); while (s.next(keys)) { - mr.delete(new Delete(keys.get(0).getRow()), null, false); + mr.delete(new Delete(keys.get(0).getRow()), false); keys.clear(); } } finally { @@ -207,7 +207,7 @@ public class TestGetClosestAtOrBefore ex Delete d = new Delete(T20); d.deleteColumn(c0, c0); - region.delete(d, null, false); + region.delete(d, false); r = region.getClosestRowBefore(T20, c0); assertTrue(Bytes.equals(T10, r.getRow())); @@ -221,7 +221,7 @@ public class TestGetClosestAtOrBefore ex d = new Delete(T30); d.deleteColumn(c0, c0); - region.delete(d, null, false); + region.delete(d, false); r = region.getClosestRowBefore(T30, c0); assertTrue(Bytes.equals(T10, r.getRow())); @@ -257,7 +257,7 @@ public class TestGetClosestAtOrBefore ex // in memory; make sure we get back t10 again. d = new Delete(T20); d.deleteColumn(c1, c1); - region.delete(d, null, false); + region.delete(d, false); r = region.getClosestRowBefore(T30, c0); assertTrue(Bytes.equals(T10, r.getRow())); Modified: hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHBase7051.java URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHBase7051.java?rev=1445918&r1=1445917&r2=1445918&view=diff ============================================================================== --- hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHBase7051.java (original) +++ hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHBase7051.java Wed Feb 13 20:58:23 2013 @@ -16,6 +16,9 @@ import org.apache.hadoop.hbase.HRegionIn import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.SmallTests; +import org.apache.hadoop.hbase.MultithreadedTestUtil; +import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext; +import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Scan; @@ -30,19 +33,32 @@ import org.junit.experimental.categories import com.google.common.collect.Lists; +/** + * Test of HBASE-7051; that checkAndPuts and puts behave atomically with respect to each other. + * Rather than perform a bunch of trials to verify atomicity, this test recreates a race condition + * that causes the test to fail if checkAndPut doesn't wait for outstanding put transactions + * to complete. It does this by invasively overriding HRegion function to affect the timing of + * the operations. + */ @Category(SmallTests.class) public class TestHBase7051 { - private static volatile boolean putCompleted = false; private static CountDownLatch latch = new CountDownLatch(1); - private boolean checkAndPutCompleted = false; - private static int count = 0; - + private enum TestStep { + INIT, // initial put of 10 to set value of the cell + PUT_STARTED, // began doing a put of 50 to cell + PUT_COMPLETED, // put complete (released RowLock, but may not have advanced MVCC). + CHECKANDPUT_STARTED, // began checkAndPut: if 10 -> 11 + CHECKANDPUT_COMPLETED // completed checkAndPut + // NOTE: at the end of these steps, the value of the cell should be 50, not 11! + } + private static volatile TestStep testStep = TestStep.INIT; + private final String family = "f1"; + @Test public void testPutAndCheckAndPutInParallel() throws Exception { final String tableName = "testPutAndCheckAndPut"; - final String family = "f1"; Configuration conf = HBaseConfiguration.create(); conf.setClass(HConstants.REGION_IMPL, MockHRegion.class, HeapSize.class); final MockHRegion region = (MockHRegion) TestHRegion.initHRegion(Bytes.toBytes(tableName), @@ -57,14 +73,16 @@ public class TestHBase7051 { putsAndLocks.add(pair); - count++; region.batchMutate(putsAndLocks.toArray(new Pair[0])); - makeCheckAndPut(family, region); - - makePut(family, region); - while (!checkAndPutCompleted) { + MultithreadedTestUtil.TestContext ctx = + new MultithreadedTestUtil.TestContext(conf); + ctx.addThread(new PutThread(ctx, region)); + ctx.addThread(new CheckAndPutThread(ctx, region)); + ctx.startThreads(); + while (testStep != TestStep.CHECKANDPUT_COMPLETED) { Thread.sleep(100); } + ctx.stop(); Scan s = new Scan(); RegionScanner scanner = region.getScanner(s); List results = new ArrayList(); @@ -75,54 +93,46 @@ public class TestHBase7051 { } - private void makePut(final String family, final MockHRegion region) { - new Thread() { - public void run() { - List> putsAndLocks = Lists.newArrayList(); - Put[] puts = new Put[1]; - Put put = new Put(Bytes.toBytes("r1")); - put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("50")); - puts[0] = put; - try { - Pair pair = new Pair(puts[0], null); - putsAndLocks.add(pair); - count++; - region.batchMutate(putsAndLocks.toArray(new Pair[0])); - } catch (IOException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - } - }.start(); - } + private class PutThread extends TestThread { + private MockHRegion region; + PutThread(TestContext ctx, MockHRegion region) { + super(ctx); + this.region = region; + } - private void makeCheckAndPut(final String family, final MockHRegion region) { - new Thread() { + public void doWork() throws Exception { + List> putsAndLocks = Lists.newArrayList(); + Put[] puts = new Put[1]; + Put put = new Put(Bytes.toBytes("r1")); + put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("50")); + puts[0] = put; + Pair pair = new Pair(puts[0], null); + putsAndLocks.add(pair); + testStep = TestStep.PUT_STARTED; + region.batchMutate(putsAndLocks.toArray(new Pair[0])); + } + } - public void run() { - Put[] puts = new Put[1]; - Put put = new Put(Bytes.toBytes("r1")); - put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("11")); - puts[0] = put; - try { - while (putCompleted == false) { - try { - Thread.sleep(100); - } catch (InterruptedException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - } - count++; - region.checkAndMutate(Bytes.toBytes("r1"), Bytes.toBytes(family), Bytes.toBytes("q1"), - CompareOp.EQUAL, new BinaryComparator(Bytes.toBytes("10")), put, null, true); - checkAndPutCompleted = true; - } catch (IOException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - } - }.start(); + private class CheckAndPutThread extends TestThread { + private MockHRegion region; + CheckAndPutThread(TestContext ctx, MockHRegion region) { + super(ctx); + this.region = region; + } + + public void doWork() throws Exception { + Put[] puts = new Put[1]; + Put put = new Put(Bytes.toBytes("r1")); + put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("11")); + puts[0] = put; + while (testStep != TestStep.PUT_COMPLETED) { + Thread.sleep(100); + } + testStep = TestStep.CHECKANDPUT_STARTED; + region.checkAndMutate(Bytes.toBytes("r1"), Bytes.toBytes(family), Bytes.toBytes("q1"), + CompareOp.EQUAL, new BinaryComparator(Bytes.toBytes("10")), put, true); + testStep = TestStep.CHECKANDPUT_COMPLETED; + } } public static class MockHRegion extends HRegion { @@ -134,36 +144,39 @@ public class TestHBase7051 { @Override public void releaseRowLock(Integer lockId) { - if (count == 1) { + if (testStep == TestStep.INIT) { super.releaseRowLock(lockId); return; } - if (count == 2) { + if (testStep == TestStep.PUT_STARTED) { try { - putCompleted = true; + testStep = TestStep.PUT_COMPLETED; super.releaseRowLock(lockId); + // put has been written to the memstore and the row lock has been released, but the + // MVCC has not been advanced. Prior to fixing HBASE-7051, the following order of + // operations would cause the non-atomicity to show up: + // 1) Put releases row lock (where we are now) + // 2) CheckAndPut grabs row lock and reads the value prior to the put (10) + // because the MVCC has not advanced + // 3) Put advances MVCC + // So, in order to recreate this order, we wait for the checkAndPut to grab the rowLock + // (see below), and then wait some more to give the checkAndPut time to read the old + // value. latch.await(); + Thread.sleep(1000); } catch (InterruptedException e) { - // TODO Auto-generated catch block - e.printStackTrace(); + Thread.currentThread().interrupt(); } } - if (count == 3) { + else if (testStep == TestStep.CHECKANDPUT_STARTED) { super.releaseRowLock(lockId); - try { - Thread.sleep(1000); - } catch (InterruptedException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - latch.countDown(); } } @Override public Integer getLock(Integer lockid, byte[] row, boolean waitForLock) throws IOException { - if (count == 3) { + if (testStep == TestStep.CHECKANDPUT_STARTED) { latch.countDown(); } return super.getLock(lockid, row, waitForLock); Modified: hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java?rev=1445918&r1=1445917&r2=1445918&view=diff ============================================================================== --- hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java (original) +++ hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java Wed Feb 13 20:58:23 2013 @@ -67,15 +67,16 @@ import org.apache.hadoop.hbase.filter.Bi import org.apache.hadoop.hbase.filter.ColumnCountGetFilter; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.filter.FilterBase; import org.apache.hadoop.hbase.filter.FilterList; import org.apache.hadoop.hbase.filter.NullComparator; import org.apache.hadoop.hbase.filter.PrefixFilter; +import org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter; import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; -import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLogFactory; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; @@ -89,6 +90,7 @@ import org.apache.hadoop.hbase.util.Incr import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.PairOfSameType; import org.apache.hadoop.hbase.util.Threads; +import org.apache.hbase.cell.CellComparator; import org.junit.Assert; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -168,7 +170,7 @@ public class TestHRegion extends HBaseTe RegionScanner scanner1 = region.getScanner(scan); Delete delete = new Delete(Bytes.toBytes("r1")); - region.delete(delete, null, false); + region.delete(delete, false); region.flushcache(); // open the second scanner @@ -199,7 +201,7 @@ public class TestHRegion extends HBaseTe System.out.println(results); assertEquals(0, results.size()); } - + @Test public void testToShowNPEOnRegionScannerReseek() throws Exception{ String method = "testToShowNPEOnRegionScannerReseek"; @@ -273,7 +275,7 @@ public class TestHRegion extends HBaseTe long seqId = region.replayRecoveredEditsIfAny(regiondir, maxSeqIdInStores, null, status); assertEquals(maxSeqId, seqId); Get get = new Get(row); - Result result = region.get(get, null); + Result result = region.get(get); for (long i = minSeqId; i <= maxSeqId; i += 10) { List kvs = result.getColumn(family, Bytes.toBytes(i)); assertEquals(1, kvs.size()); @@ -326,7 +328,7 @@ public class TestHRegion extends HBaseTe long seqId = region.replayRecoveredEditsIfAny(regiondir, maxSeqIdInStores, null, status); assertEquals(maxSeqId, seqId); Get get = new Get(row); - Result result = region.get(get, null); + Result result = region.get(get); for (long i = minSeqId; i <= maxSeqId; i += 10) { List kvs = result.getColumn(family, Bytes.toBytes(i)); if (i < recoverSeqId) { @@ -460,7 +462,7 @@ public class TestHRegion extends HBaseTe public void run() { while (!this.done.get()) { try { - assertTrue(region.get(g, null).size() > 0); + assertTrue(region.get(g).size() > 0); this.count.incrementAndGet(); } catch (Exception e) { this.e = e; @@ -531,7 +533,7 @@ public class TestHRegion extends HBaseTe break; Delete delete = new Delete(results.get(0).getRow()); delete.deleteColumn(Bytes.toBytes("trans-tags"), Bytes.toBytes("qual2")); - r.delete(delete, null, false); + r.delete(delete, false); results.clear(); } while (more); assertEquals("Did not perform correct number of deletes", 3, count); @@ -770,7 +772,6 @@ public class TestHRegion extends HBaseTe byte [] emptyVal = new byte[] {}; byte [] val1 = Bytes.toBytes("value1"); byte [] val2 = Bytes.toBytes("value2"); - Integer lockId = null; //Setting up region String method = this.getName(); @@ -782,7 +783,7 @@ public class TestHRegion extends HBaseTe //checkAndPut with empty value boolean res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, - new BinaryComparator(emptyVal), put, lockId, true); + new BinaryComparator(emptyVal), put, true); assertTrue(res); //Putting data in key @@ -791,25 +792,25 @@ public class TestHRegion extends HBaseTe //checkAndPut with correct value res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, - new BinaryComparator(emptyVal), put, lockId, true); + new BinaryComparator(emptyVal), put, true); assertTrue(res); // not empty anymore res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, - new BinaryComparator(emptyVal), put, lockId, true); + new BinaryComparator(emptyVal), put, true); assertFalse(res); Delete delete = new Delete(row1); delete.deleteColumn(fam1, qf1); res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, - new BinaryComparator(emptyVal), delete, lockId, true); + new BinaryComparator(emptyVal), delete, true); assertFalse(res); put = new Put(row1); put.add(fam1, qf1, val2); //checkAndPut with correct value res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, - new BinaryComparator(val1), put, lockId, true); + new BinaryComparator(val1), put, true); assertTrue(res); //checkAndDelete with correct value @@ -817,12 +818,12 @@ public class TestHRegion extends HBaseTe delete.deleteColumn(fam1, qf1); delete.deleteColumn(fam1, qf1); res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, - new BinaryComparator(val2), delete, lockId, true); + new BinaryComparator(val2), delete, true); assertTrue(res); delete = new Delete(row1); res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, - new BinaryComparator(emptyVal), delete, lockId, true); + new BinaryComparator(emptyVal), delete, true); assertTrue(res); //checkAndPut looking for a null value @@ -830,7 +831,7 @@ public class TestHRegion extends HBaseTe put.add(fam1, qf1, val1); res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, - new NullComparator(), put, lockId, true); + new NullComparator(), put, true); assertTrue(res); } finally { HRegion.closeHRegion(this.region); @@ -845,7 +846,6 @@ public class TestHRegion extends HBaseTe byte [] qf1 = Bytes.toBytes("qualifier"); byte [] val1 = Bytes.toBytes("value1"); byte [] val2 = Bytes.toBytes("value2"); - Integer lockId = null; //Setting up region String method = this.getName(); @@ -858,14 +858,14 @@ public class TestHRegion extends HBaseTe //checkAndPut with wrong value boolean res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, - new BinaryComparator(val2), put, lockId, true); + new BinaryComparator(val2), put, true); assertEquals(false, res); //checkAndDelete with wrong value Delete delete = new Delete(row1); delete.deleteFamily(fam1); res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, - new BinaryComparator(val2), delete, lockId, true); + new BinaryComparator(val2), delete, true); assertEquals(false, res); } finally { HRegion.closeHRegion(this.region); @@ -879,7 +879,6 @@ public class TestHRegion extends HBaseTe byte [] fam1 = Bytes.toBytes("fam1"); byte [] qf1 = Bytes.toBytes("qualifier"); byte [] val1 = Bytes.toBytes("value1"); - Integer lockId = null; //Setting up region String method = this.getName(); @@ -892,14 +891,14 @@ public class TestHRegion extends HBaseTe //checkAndPut with correct value boolean res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, - new BinaryComparator(val1), put, lockId, true); + new BinaryComparator(val1), put, true); assertEquals(true, res); //checkAndDelete with correct value Delete delete = new Delete(row1); delete.deleteColumn(fam1, qf1); res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, - new BinaryComparator(val1), put, lockId, true); + new BinaryComparator(val1), put, true); assertEquals(true, res); } finally { HRegion.closeHRegion(this.region); @@ -915,7 +914,6 @@ public class TestHRegion extends HBaseTe byte [] qf1 = Bytes.toBytes("qualifier"); byte [] val1 = Bytes.toBytes("value1"); byte [] val2 = Bytes.toBytes("value2"); - Integer lockId = null; byte [][] families = {fam1, fam2}; @@ -939,13 +937,13 @@ public class TestHRegion extends HBaseTe store.memstore.kvset.size(); boolean res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, - new BinaryComparator(val1), put, lockId, true); + new BinaryComparator(val1), put, true); assertEquals(true, res); store.memstore.kvset.size(); Get get = new Get(row1); get.addColumn(fam2, qf1); - KeyValue [] actual = region.get(get, null).raw(); + KeyValue [] actual = region.get(get).raw(); KeyValue [] expected = {kv}; @@ -966,7 +964,7 @@ public class TestHRegion extends HBaseTe put.add(fam1, qual1, value1); try { boolean res = region.checkAndMutate(row, fam1, qual1, CompareOp.EQUAL, - new BinaryComparator(value2), put, null, false); + new BinaryComparator(value2), put, false); fail(); } catch (DoNotRetryIOException expected) { // expected exception. @@ -989,7 +987,6 @@ public class TestHRegion extends HBaseTe byte [] val2 = Bytes.toBytes("value2"); byte [] val3 = Bytes.toBytes("value3"); byte[] emptyVal = new byte[] { }; - Integer lockId = null; byte [][] families = {fam1, fam2}; @@ -1017,14 +1014,14 @@ public class TestHRegion extends HBaseTe delete.deleteColumn(fam2, qf1); delete.deleteColumn(fam1, qf3); boolean res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, - new BinaryComparator(val2), delete, lockId, true); + new BinaryComparator(val2), delete, true); assertEquals(true, res); Get get = new Get(row1); get.addColumn(fam1, qf1); get.addColumn(fam1, qf3); get.addColumn(fam2, qf2); - Result r = region.get(get, null); + Result r = region.get(get); assertEquals(2, r.size()); assertEquals(val1, r.getValue(fam1, qf1)); assertEquals(val2, r.getValue(fam2, qf2)); @@ -1033,21 +1030,21 @@ public class TestHRegion extends HBaseTe delete = new Delete(row1); delete.deleteFamily(fam2); res = region.checkAndMutate(row1, fam2, qf1, CompareOp.EQUAL, - new BinaryComparator(emptyVal), delete, lockId, true); + new BinaryComparator(emptyVal), delete, true); assertEquals(true, res); get = new Get(row1); - r = region.get(get, null); + r = region.get(get); assertEquals(1, r.size()); assertEquals(val1, r.getValue(fam1, qf1)); //Row delete delete = new Delete(row1); res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, - new BinaryComparator(val1), delete, lockId, true); + new BinaryComparator(val1), delete, true); assertEquals(true, res); get = new Get(row1); - r = region.get(get, null); + r = region.get(get); assertEquals(0, r.size()); } finally { HRegion.closeHRegion(this.region); @@ -1078,11 +1075,11 @@ public class TestHRegion extends HBaseTe Delete delete = new Delete(row1); delete.deleteColumn(fam1, qual); delete.deleteColumn(fam1, qual); - region.delete(delete, null, false); + region.delete(delete, false); Get get = new Get(row1); get.addFamily(fam1); - Result r = region.get(get, null); + Result r = region.get(get); assertEquals(0, r.size()); } finally { HRegion.closeHRegion(this.region); @@ -1164,19 +1161,19 @@ public class TestHRegion extends HBaseTe // ok now delete a split: Delete delete = new Delete(row); delete.deleteColumns(fam, splitA); - region.delete(delete, null, true); + region.delete(delete, true); // assert some things: Get get = new Get(row).addColumn(fam, serverinfo); - Result result = region.get(get, null); + Result result = region.get(get); assertEquals(1, result.size()); get = new Get(row).addColumn(fam, splitA); - result = region.get(get, null); + result = region.get(get); assertEquals(0, result.size()); get = new Get(row).addColumn(fam, splitB); - result = region.get(get, null); + result = region.get(get); assertEquals(1, result.size()); // Assert that after a delete, I can put. @@ -1184,16 +1181,16 @@ public class TestHRegion extends HBaseTe put.add(fam, splitA, Bytes.toBytes("reference_A")); region.put(put); get = new Get(row); - result = region.get(get, null); + result = region.get(get); assertEquals(3, result.size()); // Now delete all... then test I can add stuff back delete = new Delete(row); - region.delete(delete, null, false); - assertEquals(0, region.get(get, null).size()); + region.delete(delete, false); + assertEquals(0, region.get(get).size()); region.put(new Put(row).add(fam, splitA, Bytes.toBytes("reference_A"))); - result = region.get(get, null); + result = region.get(get); assertEquals(1, result.size()); } finally { HRegion.closeHRegion(this.region); @@ -1219,20 +1216,20 @@ public class TestHRegion extends HBaseTe // now delete something in the present Delete delete = new Delete(row); - region.delete(delete, null, true); + region.delete(delete, true); // make sure we still see our data Get get = new Get(row).addColumn(fam, serverinfo); - Result result = region.get(get, null); + Result result = region.get(get); assertEquals(1, result.size()); // delete the future row - delete = new Delete(row,HConstants.LATEST_TIMESTAMP-3,null); - region.delete(delete, null, true); + delete = new Delete(row,HConstants.LATEST_TIMESTAMP-3); + region.delete(delete, true); // make sure it is gone get = new Get(row).addColumn(fam, serverinfo); - result = region.get(get, null); + result = region.get(get); assertEquals(0, result.size()); } finally { HRegion.closeHRegion(this.region); @@ -1262,7 +1259,7 @@ public class TestHRegion extends HBaseTe // Make sure it shows up with an actual timestamp Get get = new Get(row).addColumn(fam, qual); - Result result = region.get(get, null); + Result result = region.get(get); assertEquals(1, result.size()); KeyValue kv = result.raw()[0]; LOG.info("Got: " + kv); @@ -1278,7 +1275,7 @@ public class TestHRegion extends HBaseTe // Make sure it shows up with an actual timestamp get = new Get(row).addColumn(fam, qual); - result = region.get(get, null); + result = region.get(get); assertEquals(1, result.size()); kv = result.raw()[0]; LOG.info("Got: " + kv); @@ -1343,7 +1340,7 @@ public class TestHRegion extends HBaseTe Delete delete = new Delete(rowA); delete.deleteFamily(fam1); - region.delete(delete, null, true); + region.delete(delete, true); // now create data. Put put = new Put(rowA); @@ -1394,7 +1391,7 @@ public class TestHRegion extends HBaseTe region.put(put); // now delete the value: - region.delete(delete, null, true); + region.delete(delete, true); // ok put data: @@ -1406,7 +1403,7 @@ public class TestHRegion extends HBaseTe Get get = new Get(row); get.addColumn(fam1, qual1); - Result r = region.get(get, null); + Result r = region.get(get); assertEquals(1, r.size()); assertByteEquals(value2, r.getValue(fam1, qual1)); @@ -1486,7 +1483,7 @@ public class TestHRegion extends HBaseTe //Test try { - region.get(get, null); + region.get(get); } catch (DoNotRetryIOException e) { assertFalse(false); return; @@ -1530,7 +1527,7 @@ public class TestHRegion extends HBaseTe KeyValue [] expected = {kv1, kv2}; //Test - Result res = region.get(get, null); + Result res = region.get(get); assertEquals(expected.length, res.size()); for(int i=0; ithreads = new ArrayList(threadCount); - for (int i = 0; i < threadCount; i++) { - threads.add(new Thread(Integer.toString(i)) { - @Override - public void run() { - Integer [] lockids = new Integer[lockCount]; - // Get locks. - for (int i = 0; i < lockCount; i++) { - try { - byte [] rowid = Bytes.toBytes(Integer.toString(i)); - lockids[i] = region.obtainRowLock(rowid); - assertEquals(rowid, region.getRowFromLock(lockids[i])); - LOG.debug(getName() + " locked " + Bytes.toString(rowid)); - } catch (IOException e) { - e.printStackTrace(); - } - } - LOG.debug(getName() + " set " + - Integer.toString(lockCount) + " locks"); - - // Abort outstanding locks. - for (int i = lockCount - 1; i >= 0; i--) { - region.releaseRowLock(lockids[i]); - LOG.debug(getName() + " unlocked " + i); - } - LOG.debug(getName() + " released " + - Integer.toString(lockCount) + " locks"); - } - }); - } - - // Startup all our threads. - for (Thread t : threads) { - t.start(); - } - - // Now wait around till all are done. - for (Thread t: threads) { - while (t.isAlive()) { - try { - Thread.sleep(1); - } catch (InterruptedException e) { - // Go around again. - } - } - } - LOG.info("locks completed."); - } finally { - HRegion.closeHRegion(this.region); - this.region = null; - } - } - - ////////////////////////////////////////////////////////////////////////////// // Merge test ////////////////////////////////////////////////////////////////////////////// public void testMerge() throws IOException { @@ -1982,8 +1911,8 @@ public class TestHRegion extends HBaseTe res = new ArrayList(); is.next(res); - for(int i=0; i(); is.next(res); for(int i=0; i results = new ArrayList(); + assertTrue(s.next(results)); + assertEquals(results.size(), 1); + results.clear(); + + assertTrue(s.next(results)); + assertEquals(results.size(), 3); + assertTrue("orderCheck", results.get(0).matchingFamily(cf_alpha)); + assertTrue("orderCheck", results.get(1).matchingFamily(cf_essential)); + assertTrue("orderCheck", results.get(2).matchingFamily(cf_joined)); + results.clear(); + + assertFalse(s.next(results)); + assertEquals(results.size(), 0); + } finally { + HRegion.closeHRegion(this.region); + this.region = null; + } + } + + /** + * HBASE-5416 + * + * Test case when scan limits amount of KVs returned on each next() call. + */ + public void testScanner_JoinedScannersWithLimits() throws IOException { + final byte [] tableName = Bytes.toBytes("testTable"); + final byte [] cf_first = Bytes.toBytes("first"); + final byte [] cf_second = Bytes.toBytes("second"); + + this.region = initHRegion(tableName, getName(), conf, cf_first, cf_second); + try { + final byte [] col_a = Bytes.toBytes("a"); + final byte [] col_b = Bytes.toBytes("b"); + + Put put; + + for (int i = 0; i < 10; i++) { + put = new Put(Bytes.toBytes("r" + Integer.toString(i))); + put.add(cf_first, col_a, Bytes.toBytes(i)); + if (i < 5) { + put.add(cf_first, col_b, Bytes.toBytes(i)); + put.add(cf_second, col_a, Bytes.toBytes(i)); + put.add(cf_second, col_b, Bytes.toBytes(i)); + } + region.put(put); + } + + Scan scan = new Scan(); + scan.setLoadColumnFamiliesOnDemand(true); + Filter bogusFilter = new FilterBase() { + @Override + public boolean isFamilyEssential(byte[] name) { + return Bytes.equals(name, cf_first); + } + }; + + scan.setFilter(bogusFilter); + InternalScanner s = region.getScanner(scan); + + // Our data looks like this: + // r0: first:a, first:b, second:a, second:b + // r1: first:a, first:b, second:a, second:b + // r2: first:a, first:b, second:a, second:b + // r3: first:a, first:b, second:a, second:b + // r4: first:a, first:b, second:a, second:b + // r5: first:a + // r6: first:a + // r7: first:a + // r8: first:a + // r9: first:a + + // But due to next's limit set to 3, we should get this: + // r0: first:a, first:b, second:a + // r0: second:b + // r1: first:a, first:b, second:a + // r1: second:b + // r2: first:a, first:b, second:a + // r2: second:b + // r3: first:a, first:b, second:a + // r3: second:b + // r4: first:a, first:b, second:a + // r4: second:b + // r5: first:a + // r6: first:a + // r7: first:a + // r8: first:a + // r9: first:a + + List results = new ArrayList(); + int index = 0; + while (true) { + boolean more = s.next(results, 3); + if ((index >> 1) < 5) { + if (index % 2 == 0) + assertEquals(results.size(), 3); + else + assertEquals(results.size(), 1); + } + else + assertEquals(results.size(), 1); + results.clear(); + index++; + if (!more) break; } } finally { HRegion.closeHRegion(this.region); @@ -2890,8 +2979,8 @@ public class TestHRegion extends HBaseTe numPutsFinished++; if (numPutsFinished > 0 && numPutsFinished % 47 == 0) { System.out.println("put iteration = " + numPutsFinished); - Delete delete = new Delete(row, (long)numPutsFinished-30, null); - region.delete(delete, null, true); + Delete delete = new Delete(row, (long)numPutsFinished-30); + region.delete(delete, true); } numPutsFinished++; } @@ -2976,7 +3065,7 @@ public class TestHRegion extends HBaseTe for (int i = 0; i < testCount; i++) { boolean previousEmpty = result == null || result.isEmpty(); - result = region.get(get, null); + result = region.get(get); if (!result.isEmpty() || !previousEmpty || i > compactInterval) { assertEquals("i=" + i, expectedCount, result.size()); // TODO this was removed, now what dangit?! @@ -3035,14 +3124,14 @@ public class TestHRegion extends HBaseTe byte[] rowNotServed = Bytes.toBytes("a"); Get g = new Get(rowNotServed); try { - region.get(g, null); + region.get(g); fail(); } catch (WrongRegionException x) { // OK } byte[] row = Bytes.toBytes("y"); g = new Get(row); - region.get(g, null); + region.get(g); } finally { HRegion.closeHRegion(this.region); this.region = null; @@ -3063,9 +3152,9 @@ public class TestHRegion extends HBaseTe region.flushcache(); - Delete delete = new Delete(Bytes.toBytes(1L), 1L, null); + Delete delete = new Delete(Bytes.toBytes(1L), 1L); //delete.deleteColumn(family, qual1); - region.delete(delete, null, true); + region.delete(delete, true); put = new Put(Bytes.toBytes(2L)); put.add(family, qual1, 2L, Bytes.toBytes(2L)); @@ -3189,7 +3278,7 @@ public class TestHRegion extends HBaseTe //Get rows Get get = new Get(row); get.setMaxVersions(); - KeyValue[] kvs = region.get(get, null).raw(); + KeyValue[] kvs = region.get(get).raw(); //Check if rows are correct assertEquals(4, kvs.length); @@ -3233,14 +3322,14 @@ public class TestHRegion extends HBaseTe region.flushcache(); Delete del = new Delete(row); - region.delete(del, null, true); + region.delete(del, true); region.flushcache(); // Get remaining rows (should have none) Get get = new Get(row); get.addColumn(familyName, col); - KeyValue[] keyValues = region.get(get, null).raw(); + KeyValue[] keyValues = region.get(get).raw(); assertTrue(keyValues.length == 0); } finally { HRegion.closeHRegion(this.region); @@ -3345,7 +3434,55 @@ public class TestHRegion extends HBaseTe HRegion.closeHRegion(region); } } - + + /** + * Verifies that the .regioninfo file is written on region creation + * and that is recreated if missing during region opening. + */ + public void testRegionInfoFileCreation() throws IOException { + Path rootDir = new Path(DIR + "testRegionInfoFileCreation"); + Configuration conf = HBaseConfiguration.create(this.conf); + + HTableDescriptor htd = new HTableDescriptor("testtb"); + htd.addFamily(new HColumnDescriptor("cf")); + + HRegionInfo hri = new HRegionInfo(htd.getName()); + + // Create a region and skip the initialization (like CreateTableHandler) + HRegion region = HRegion.createHRegion(hri, rootDir, conf, htd, null, false, true); + Path regionDir = region.getRegionDir(); + FileSystem fs = region.getFilesystem(); + HRegion.closeHRegion(region); + + Path regionInfoFile = new Path(regionDir, HRegion.REGIONINFO_FILE); + + // Verify that the .regioninfo file is present + assertTrue(HRegion.REGIONINFO_FILE + " should be present in the region dir", + fs.exists(regionInfoFile)); + + // Try to open the region + region = HRegion.openHRegion(rootDir, hri, htd, null, conf); + assertEquals(regionDir, region.getRegionDir()); + HRegion.closeHRegion(region); + + // Verify that the .regioninfo file is still there + assertTrue(HRegion.REGIONINFO_FILE + " should be present in the region dir", + fs.exists(regionInfoFile)); + + // Remove the .regioninfo file and verify is recreated on region open + fs.delete(regionInfoFile); + assertFalse(HRegion.REGIONINFO_FILE + " should be removed from the region dir", + fs.exists(regionInfoFile)); + + region = HRegion.openHRegion(rootDir, hri, htd, null, conf); + assertEquals(regionDir, region.getRegionDir()); + HRegion.closeHRegion(region); + + // Verify that the .regioninfo file is still there + assertTrue(HRegion.REGIONINFO_FILE + " should be present in the region dir", + fs.exists(new Path(regionDir, HRegion.REGIONINFO_FILE))); + } + /** * TestCase for increment * @@ -3371,7 +3508,7 @@ public class TestHRegion extends HBaseTe inc.addColumn(family, qualifier, ONE); count++; try { - region.increment(inc, null, true); + region.increment(inc, true); } catch (IOException e) { e.printStackTrace(); break; @@ -3426,7 +3563,7 @@ public class TestHRegion extends HBaseTe Get get = new Get(Incrementer.incRow); get.addColumn(Incrementer.family, Incrementer.qualifier); get.setMaxVersions(1); - Result res = this.region.get(get, null); + Result res = this.region.get(get); List kvs = res.getColumn(Incrementer.family, Incrementer.qualifier); @@ -3462,7 +3599,7 @@ public class TestHRegion extends HBaseTe app.add(family, qualifier, CHAR); count++; try { - region.append(app, null, true); + region.append(app, true); } catch (IOException e) { e.printStackTrace(); break; @@ -3520,7 +3657,7 @@ public class TestHRegion extends HBaseTe Get get = new Get(Appender.appendRow); get.addColumn(Appender.family, Appender.qualifier); get.setMaxVersions(1); - Result res = this.region.get(get, null); + Result res = this.region.get(get); List kvs = res.getColumn(Appender.family, Appender.qualifier); @@ -3557,7 +3694,7 @@ public class TestHRegion extends HBaseTe get = new Get(row); get.addColumn(family, qualifier); get.setMaxVersions(); - res = this.region.get(get, null); + res = this.region.get(get); kvs = res.getColumn(family, qualifier); assertEquals(1, kvs.size()); assertEquals(Bytes.toBytes("value0"), kvs.get(0).getValue()); @@ -3566,7 +3703,7 @@ public class TestHRegion extends HBaseTe get = new Get(row); get.addColumn(family, qualifier); get.setMaxVersions(); - res = this.region.get(get, null); + res = this.region.get(get); kvs = res.getColumn(family, qualifier); assertEquals(1, kvs.size()); assertEquals(Bytes.toBytes("value0"), kvs.get(0).getValue()); @@ -3578,7 +3715,7 @@ public class TestHRegion extends HBaseTe get = new Get(row); get.addColumn(family, qualifier); get.setMaxVersions(); - res = this.region.get(get, null); + res = this.region.get(get); kvs = res.getColumn(family, qualifier); assertEquals(1, kvs.size()); assertEquals(Bytes.toBytes("value1"), kvs.get(0).getValue()); @@ -3587,7 +3724,7 @@ public class TestHRegion extends HBaseTe get = new Get(row); get.addColumn(family, qualifier); get.setMaxVersions(); - res = this.region.get(get, null); + res = this.region.get(get); kvs = res.getColumn(family, qualifier); assertEquals(1, kvs.size()); assertEquals(Bytes.toBytes("value1"), kvs.get(0).getValue()); @@ -3615,7 +3752,7 @@ public class TestHRegion extends HBaseTe for(byte [] family : families) { get.addColumn(family, qf); } - Result result = newReg.get(get, null); + Result result = newReg.get(get); KeyValue [] raw = result.raw(); assertEquals(families.length, result.size()); for(int j=0; j